Encapsulate gradient descent code
The completed gram matrix from this commit matches the one from commit
e7dde58
to six decimal places.
This commit is contained in:
parent
e7dde5800c
commit
133519cacb
100
engine-proto/gram-test/Engine.jl
Normal file
100
engine-proto/gram-test/Engine.jl
Normal file
@ -0,0 +1,100 @@
|
||||
module Engine
|
||||
|
||||
using LinearAlgebra
|
||||
using SparseArrays
|
||||
|
||||
export Q, DescentHistory, realize_gram
|
||||
|
||||
# the Lorentz form
|
||||
Q = diagm([1, 1, 1, 1, -1])
|
||||
|
||||
# the difference between the matrices `target` and `attempt`, projected onto the
|
||||
# subspace of matrices whose entries vanish at each empty index of `target`
|
||||
function proj_diff(target::SparseMatrixCSC{T, <:Any}, attempt::Matrix{T}) where T
|
||||
J, K, values = findnz(target)
|
||||
result = zeros(size(target)...)
|
||||
for (j, k, val) in zip(J, K, values)
|
||||
result[j, k] = val - attempt[j, k]
|
||||
end
|
||||
result
|
||||
end
|
||||
|
||||
# a type for keeping track of gradient descent history
|
||||
struct DescentHistory{T}
|
||||
scaled_loss::Array{T}
|
||||
stepsize::Array{T}
|
||||
backoff_steps::Array{Int64}
|
||||
|
||||
function DescentHistory{T}(
|
||||
scaled_loss = Array{T}(undef, 0),
|
||||
stepsize = Array{T}(undef, 0),
|
||||
backoff_steps = Int64[]
|
||||
) where T
|
||||
new(scaled_loss, stepsize, backoff_steps)
|
||||
end
|
||||
end
|
||||
|
||||
# seek a matrix `L` for which `L'QL` matches the sparse matrix `gram` at every
|
||||
# explicit entry of `gram`. use gradient descent starting from `guess`
|
||||
function realize_gram(
|
||||
gram::SparseMatrixCSC{T, <:Any},
|
||||
guess::Matrix{T};
|
||||
scaled_tol = 1e-30,
|
||||
target_improvement = 0.5,
|
||||
init_stepsize = 1.0,
|
||||
backoff = 0.9,
|
||||
max_descent_steps = 600,
|
||||
max_backoff_steps = 110
|
||||
) where T <: Number
|
||||
# start history
|
||||
history = DescentHistory{T}()
|
||||
|
||||
# scale tolerance
|
||||
scale_adjustment = sqrt(T(nnz(gram)))
|
||||
tol = scale_adjustment * scaled_tol
|
||||
|
||||
# initialize variables
|
||||
stepsize = init_stepsize
|
||||
L = copy(guess)
|
||||
|
||||
# do gradient descent
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = norm(Δ_proj)
|
||||
for step in 1:max_descent_steps
|
||||
# stop if the loss is tolerably low
|
||||
if loss < tol
|
||||
break
|
||||
end
|
||||
|
||||
# find negative gradient of loss function
|
||||
neg_grad = 4*Q*L*Δ_proj
|
||||
slope = norm(neg_grad)
|
||||
|
||||
# store current position and loss
|
||||
L_last = L
|
||||
loss_last = loss
|
||||
push!(history.scaled_loss, loss / scale_adjustment)
|
||||
|
||||
# find a good step size using backtracking line search
|
||||
push!(history.stepsize, 0)
|
||||
push!(history.backoff_steps, max_backoff_steps)
|
||||
for backoff_steps in 0:max_backoff_steps
|
||||
history.stepsize[end] = stepsize
|
||||
L = L_last + stepsize * neg_grad
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = norm(Δ_proj)
|
||||
improvement = loss_last - loss
|
||||
if improvement >= target_improvement * stepsize * slope
|
||||
history.backoff_steps[end] = backoff_steps
|
||||
break
|
||||
end
|
||||
stepsize *= backoff
|
||||
end
|
||||
end
|
||||
|
||||
# return the factorization and its history
|
||||
push!(history.scaled_loss, loss / scale_adjustment)
|
||||
L, history
|
||||
end
|
||||
|
||||
end
|
@ -1,28 +1,9 @@
|
||||
using LinearAlgebra
|
||||
include("Engine.jl")
|
||||
|
||||
using SparseArrays
|
||||
using AbstractAlgebra
|
||||
using PolynomialRoots
|
||||
|
||||
# testing Gram matrix recovery using a homemade gradient descent routine
|
||||
|
||||
# === gradient descent ===
|
||||
|
||||
# the difference between the matrices `target` and `attempt`, projected onto the
|
||||
# subspace of matrices whose entries vanish at each empty index of `target`
|
||||
function proj_diff(target, attempt)
|
||||
J, K, values = findnz(target)
|
||||
result = zeros(BigFloat, size(target)...)
|
||||
for (j, k, val) in zip(J, K, values)
|
||||
result[j, k] = val - attempt[j, k]
|
||||
end
|
||||
result
|
||||
end
|
||||
|
||||
# === example ===
|
||||
|
||||
# the Lorentz form
|
||||
Q = diagm([1, 1, 1, 1, -1])
|
||||
|
||||
# initialize the partial gram matrix for an arrangement of seven spheres in
|
||||
# which spheres 1 through 5 are mutually tangent, and spheres 3 through 7 are
|
||||
# also mutually tangent
|
||||
@ -61,50 +42,13 @@ guess = sqrt(0.5) * BigFloat[
|
||||
1 1 1 1 2 0.2 0.1;
|
||||
]
|
||||
|
||||
# search parameters
|
||||
steps = 600
|
||||
line_search_max_steps = 100
|
||||
init_stepsize = BigFloat(1)
|
||||
step_shrink_factor = BigFloat(0.9)
|
||||
target_improvement_factor = BigFloat(0.5)
|
||||
|
||||
# complete the gram matrix using gradient descent
|
||||
loss_history = Array{BigFloat}(undef, steps + 1)
|
||||
stepsize_history = Array{BigFloat}(undef, steps)
|
||||
line_search_depth_history = fill(line_search_max_steps, steps)
|
||||
stepsize = init_stepsize
|
||||
L = copy(guess)
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = norm(Δ_proj)
|
||||
for step in 1:steps
|
||||
# find negative gradient of loss function
|
||||
neg_grad = 4*Q*L*Δ_proj
|
||||
slope = norm(neg_grad)
|
||||
|
||||
# store current position and loss
|
||||
L_last = L
|
||||
loss_last = loss
|
||||
loss_history[step] = loss
|
||||
|
||||
# find a good step size using backtracking line search
|
||||
for line_search_depth in 1:line_search_max_steps
|
||||
stepsize_history[step] = stepsize
|
||||
global L = L_last + stepsize * neg_grad
|
||||
global Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
global loss = norm(Δ_proj)
|
||||
improvement = loss_last - loss
|
||||
if improvement >= target_improvement_factor * stepsize * slope
|
||||
line_search_depth_history[step] = line_search_depth
|
||||
break
|
||||
end
|
||||
global stepsize *= step_shrink_factor
|
||||
end
|
||||
end
|
||||
completed_gram = L'*Q*L
|
||||
loss_history[steps + 1] = loss
|
||||
L, history = Engine.realize_gram(gram, guess)
|
||||
completed_gram = L'*Engine.Q*L
|
||||
println("Completed Gram matrix:\n")
|
||||
display(completed_gram)
|
||||
println("\nLoss: ", loss, "\n")
|
||||
println("\nSteps: ", size(history.stepsize, 1))
|
||||
println("Loss: ", history.scaled_loss[end], "\n")
|
||||
|
||||
# === algebraic check ===
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user