dyna3/engine-proto/gram-test/descent-test.jl
2024-06-26 11:39:34 -07:00

138 lines
3.8 KiB
Julia

using LinearAlgebra
using SparseArrays
using AbstractAlgebra
using PolynomialRoots
# testing Gram matrix recovery using a homemade gradient descent routine
# === gradient descent ===
# the difference between the matrices `target` and `attempt`, projected onto the
# subspace of matrices whose entries vanish at each empty index of `target`
function proj_diff(target, attempt)
J, K, values = findnz(target)
result = zeros(size(target)...)
for (j, k, val) in zip(J, K, values)
result[j, k] = val - attempt[j, k]
end
result
end
# === example ===
# the Lorentz form
Q = diagm([1, 1, 1, 1, -1])
# initialize the partial gram matrix for an arrangement of seven spheres in
# which spheres 1 through 5 are mutually tangent, and spheres 3 through 7 are
# also mutually tangent
J = Int64[]
K = Int64[]
values = BigFloat[]
for j in 1:7
for k in 1:7
if (j <= 5 && k <= 5) || (j >= 3 && k >= 3)
push!(J, j)
push!(K, k)
push!(values, j == k ? 1 : -1)
end
end
end
gram = sparse(J, K, values)
# set the independent variable
#
# using gram[6, 2] or gram[7, 1] as the independent variable seems to stall
# convergence, even if its value comes from a known solution, like
#
# gram[6, 2] = 0.9936131705272925
#
indep_val = -9//5
gram[6, 1] = BigFloat(indep_val)
gram[1, 6] = gram[6, 1]
# in this initial guess, the mutual tangency condition is satisfied for spheres
# 1 through 5
guess = sqrt(0.5) * BigFloat[
1 1 -1 -1 0 -0.1 0.3;
1 -1 1 -1 0 -0.5 0.4;
1 -1 -1 1 0 0.1 -0.2;
0 0 0 0 -sqrt(6) 0.3 -0.2;
1 1 1 1 2 0.2 0.1;
]
# search parameters
steps = 600
line_search_max_steps = 100
init_stepsize = BigFloat(1)
step_shrink_factor = BigFloat(0.5)
target_improvement_factor = BigFloat(0.5)
# complete the gram matrix using gradient descent
loss_history = Array{BigFloat}(undef, steps + 1)
stepsize_history = Array{BigFloat}(undef, steps)
line_search_depth_history = fill(line_search_max_steps, steps)
stepsize = init_stepsize
L = copy(guess)
Δ_proj = proj_diff(gram, L'*Q*L)
loss = norm(Δ_proj)
for step in 1:steps
# find negative gradient of loss function
neg_grad = 4*Q*L*Δ_proj
slope = norm(neg_grad)
# store current position and loss
L_last = L
loss_last = loss
loss_history[step] = loss
# find a good step size using backtracking line search
for line_search_depth in 1:line_search_max_steps
stepsize_history[step] = stepsize
global L = L_last + stepsize * neg_grad
global Δ_proj = proj_diff(gram, L'*Q*L)
global loss = norm(Δ_proj)
improvement = loss_last - loss
if improvement >= target_improvement_factor * stepsize * slope
line_search_depth_history[step] = line_search_depth
break
end
global stepsize *= step_shrink_factor
end
end
completed_gram = L'*Q*L
loss_history[steps + 1] = loss
println("Completed Gram matrix:\n")
display(completed_gram)
println("\nLoss: ", loss, "\n")
# === algebraic check ===
R, gens = polynomial_ring(AbstractAlgebra.Rationals{BigInt}(), ["x", "t₁", "t₂", "t₃"])
x = gens[1]
t = gens[2:4]
S, u = polynomial_ring(AbstractAlgebra.Rationals{BigInt}(), "u")
M = matrix_space(R, 7, 7)
gram_symb = M(R[
1 -1 -1 -1 -1 t[1] t[2];
-1 1 -1 -1 -1 x t[3]
-1 -1 1 -1 -1 -1 -1;
-1 -1 -1 1 -1 -1 -1;
-1 -1 -1 -1 1 -1 -1;
t[1] x -1 -1 -1 1 -1;
t[2] t[3] -1 -1 -1 -1 1
])
rank_constraints = det.([
gram_symb[1:6, 1:6],
gram_symb[2:7, 2:7],
gram_symb[[1, 3, 4, 5, 6, 7], [1, 3, 4, 5, 6, 7]]
])
# solve for x and t
x_constraint = 25//16 * to_univariate(S, evaluate(rank_constraints[1], [2], [indep_val]))
t₂_constraint = 25//16 * to_univariate(S, evaluate(rank_constraints[3], [2], [indep_val]))
x_vals = PolynomialRoots.roots(x_constraint.coeffs)
t₂_vals = PolynomialRoots.roots(t₂_constraint.coeffs)