Use Newton's method for polishing
This commit is contained in:
parent
d538cbf716
commit
3910b9f740
@ -51,11 +51,21 @@ end
|
||||
# the Lorentz form
|
||||
Q = diagm([1, 1, 1, 1, -1])
|
||||
|
||||
# project a matrix onto the subspace of matrices whose entries vanish at the
|
||||
# given indices
|
||||
function proj_to_entries(mat, indices)
|
||||
result = zeros(size(mat))
|
||||
for (j, k) in indices
|
||||
result[j, k] = mat[j, k]
|
||||
end
|
||||
result
|
||||
end
|
||||
|
||||
# the difference between the matrices `target` and `attempt`, projected onto the
|
||||
# subspace of matrices whose entries vanish at each empty index of `target`
|
||||
function proj_diff(target::SparseMatrixCSC{T, <:Any}, attempt::Matrix{T}) where T
|
||||
J, K, values = findnz(target)
|
||||
result = zeros(size(target)...)
|
||||
result = zeros(size(target))
|
||||
for (j, k, val) in zip(J, K, values)
|
||||
result[j, k] = val - attempt[j, k]
|
||||
end
|
||||
@ -87,7 +97,7 @@ end
|
||||
|
||||
# seek a matrix `L` for which `L'QL` matches the sparse matrix `gram` at every
|
||||
# explicit entry of `gram`. use gradient descent starting from `guess`
|
||||
function realize_gram(
|
||||
function realize_gram_gradient(
|
||||
gram::SparseMatrixCSC{T, <:Any},
|
||||
guess::Matrix{T};
|
||||
scaled_tol = 1e-30,
|
||||
@ -111,7 +121,7 @@ function realize_gram(
|
||||
# do gradient descent
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = dot(Δ_proj, Δ_proj)
|
||||
for step in 1:max_descent_steps
|
||||
for _ in 1:max_descent_steps
|
||||
# stop if the loss is tolerably low
|
||||
if loss < tol
|
||||
break
|
||||
@ -160,4 +170,73 @@ function realize_gram(
|
||||
L, history
|
||||
end
|
||||
|
||||
function basis_matrix(::Type{T}, j, k, dims) where T
|
||||
result = zeros(T, dims)
|
||||
result[j, k] = one(T)
|
||||
result
|
||||
end
|
||||
|
||||
# seek a matrix `L` for which `L'QL` matches the sparse matrix `gram` at every
|
||||
# explicit entry of `gram`. use Newton's method starting from `guess`
|
||||
function realize_gram_newton(
|
||||
gram::SparseMatrixCSC{T, <:Any},
|
||||
guess::Matrix{T};
|
||||
scaled_tol = 1e-30,
|
||||
rate = 1,
|
||||
max_steps = 100
|
||||
) where T <: Number
|
||||
# start history
|
||||
history = DescentHistory{T}()
|
||||
|
||||
# find the dimension of the search space
|
||||
dims = size(guess)
|
||||
element_dim, construction_dim = dims
|
||||
total_dim = element_dim * construction_dim
|
||||
|
||||
# list the constrained entries of the gram matrix
|
||||
J, K, _ = findnz(gram)
|
||||
constrained = zip(J, K)
|
||||
|
||||
# scale the tolerance
|
||||
scale_adjustment = sqrt(T(length(constrained)))
|
||||
tol = scale_adjustment * scaled_tol
|
||||
|
||||
# use newton's method
|
||||
L = copy(guess)
|
||||
for step in 0:max_steps
|
||||
# evaluate the loss function
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = dot(Δ_proj, Δ_proj)
|
||||
|
||||
# store the current loss
|
||||
push!(history.scaled_loss, loss / scale_adjustment)
|
||||
|
||||
# stop if the loss is tolerably low
|
||||
if loss < tol || step > max_steps
|
||||
break
|
||||
end
|
||||
|
||||
# find the negative gradient of loss function
|
||||
neg_grad = 4*Q*L*Δ_proj
|
||||
|
||||
# find the negative Hessian of the loss function
|
||||
hess = Matrix{T}(undef, total_dim, total_dim)
|
||||
indices = [(j, k) for k in 1:construction_dim for j in 1:element_dim]
|
||||
for (j, k) in indices
|
||||
basis_mat = basis_matrix(T, j, k, dims)
|
||||
neg_dΔ = basis_mat'*Q*L + L'*Q*basis_mat
|
||||
neg_dΔ_proj = proj_to_entries(neg_dΔ, constrained)
|
||||
deriv_grad = 4*Q*(-basis_mat*Δ_proj + L*neg_dΔ_proj)
|
||||
hess[:, (k-1)*element_dim + j] = reshape(deriv_grad, total_dim)
|
||||
end
|
||||
|
||||
# compute the newton step
|
||||
step = hess \ reshape(neg_grad, total_dim)
|
||||
L += rate * reshape(step, dims)
|
||||
end
|
||||
|
||||
# return the factorization and its history
|
||||
L, history
|
||||
end
|
||||
|
||||
end
|
@ -55,7 +55,6 @@ gram = sparse(J, K, values)
|
||||
## guess = Engine.rand_on_shell(fill(BigFloat(-1), 8))
|
||||
|
||||
# set initial guess
|
||||
#=
|
||||
guess = hcat(
|
||||
Engine.plane(BigFloat[0, 0, 1], BigFloat(0)),
|
||||
Engine.sphere(BigFloat[0, 0, 0], BigFloat(1//2)),
|
||||
@ -67,7 +66,7 @@ guess = hcat(
|
||||
Engine.sphere(BigFloat[cos(pi/3), sin(pi/3), 0], BigFloat(1//5)),
|
||||
BigFloat[0, 0, 0, 1, 1]
|
||||
)
|
||||
=#
|
||||
#=
|
||||
guess = hcat(
|
||||
Engine.plane(BigFloat[0, 0, 1], BigFloat(0)),
|
||||
Engine.sphere(BigFloat[0, 0, 0], BigFloat(0.9)),
|
||||
@ -79,11 +78,19 @@ guess = hcat(
|
||||
Engine.sphere(4//3*BigFloat[cos(pi/3), sin(pi/3), 0], BigFloat(1//3)),
|
||||
BigFloat[0, 0, 0, 1, 1]
|
||||
)
|
||||
=#
|
||||
|
||||
# complete the gram matrix using gradient descent
|
||||
L, history = Engine.realize_gram(gram, guess, max_descent_steps = 200)
|
||||
# complete the gram matrix using gradient descent followed by Newton's method
|
||||
L, history = Engine.realize_gram_gradient(gram, guess, scaled_tol = 0.01)
|
||||
L_pol, history_pol = Engine.realize_gram_newton(gram, L, rate = 0.3, scaled_tol = 1e-9)
|
||||
L_pol2, history_pol2 = Engine.realize_gram_newton(gram, L_pol)
|
||||
completed_gram = L'*Engine.Q*L
|
||||
println("Completed Gram matrix:\n")
|
||||
display(completed_gram)
|
||||
println("\nSteps: ", size(history.stepsize, 1))
|
||||
println("Loss: ", history.scaled_loss[end], "\n")
|
||||
println(
|
||||
"\nSteps: ",
|
||||
size(history.scaled_loss, 1),
|
||||
" + ", size(history_pol.scaled_loss, 1),
|
||||
" + ", size(history_pol2.scaled_loss, 1)
|
||||
)
|
||||
println("Loss: ", history_pol2.scaled_loss[end], "\n")
|
@ -47,13 +47,14 @@ guess = hcat(
|
||||
Engine.rand_on_shell(fill(BigFloat(-1), 2))
|
||||
)
|
||||
|
||||
# complete the gram matrix using gradient descent
|
||||
L, history = Engine.realize_gram(gram, guess)
|
||||
# complete the gram matrix using gradient descent followed by Newton's method
|
||||
L, history = Engine.realize_gram_gradient(gram, guess, scaled_tol = 0.01)
|
||||
L_pol, history_pol = Engine.realize_gram_newton(gram, L)
|
||||
completed_gram = L'*Engine.Q*L
|
||||
println("Completed Gram matrix:\n")
|
||||
display(completed_gram)
|
||||
println("\nSteps: ", size(history.stepsize, 1))
|
||||
println("Loss: ", history.scaled_loss[end], "\n")
|
||||
println("\nSteps: ", size(history.scaled_loss, 1), " + ", size(history_pol.scaled_loss, 1))
|
||||
println("Loss: ", history_pol.scaled_loss[end], "\n")
|
||||
|
||||
# === algebraic check ===
|
||||
|
||||
|
@ -1,8 +1,6 @@
|
||||
include("Engine.jl")
|
||||
|
||||
using SparseArrays
|
||||
using AbstractAlgebra
|
||||
using PolynomialRoots
|
||||
using Random
|
||||
|
||||
# initialize the partial gram matrix for a sphere inscribed in a regular
|
||||
@ -35,10 +33,10 @@ guess = sqrt(1/BigFloat(3)) * BigFloat[
|
||||
1 1 1 1 1
|
||||
] + 0.2*Engine.rand_on_shell(fill(BigFloat(-1), 5))
|
||||
|
||||
# complete the gram matrix using gradient descent
|
||||
L, history = Engine.realize_gram(gram, guess)
|
||||
# complete the gram matrix using Newton's method
|
||||
L, history = Engine.realize_gram_newton(gram, guess)
|
||||
completed_gram = L'*Engine.Q*L
|
||||
println("Completed Gram matrix:\n")
|
||||
display(completed_gram)
|
||||
println("\nSteps: ", size(history.stepsize, 1))
|
||||
println("\nSteps: ", size(history.scaled_loss, 1))
|
||||
println("Loss: ", history.scaled_loss[end], "\n")
|
Loading…
Reference in New Issue
Block a user