Try numerical low-rank factorization
The best technique I've found so far is the homemade gradient descent routine in `descent-test.jl`.
This commit is contained in:
parent
ef33b8ee10
commit
58a5c38e62
137
engine-proto/gram-test/descent-test.jl
Normal file
137
engine-proto/gram-test/descent-test.jl
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
using LinearAlgebra
|
||||||
|
using SparseArrays
|
||||||
|
using AbstractAlgebra
|
||||||
|
using PolynomialRoots
|
||||||
|
|
||||||
|
# testing Gram matrix recovery using a homemade gradient descent routine
|
||||||
|
|
||||||
|
# === gradient descent ===
|
||||||
|
|
||||||
|
# the difference between the matrices `target` and `attempt`, projected onto the
|
||||||
|
# subspace of matrices whose entries vanish at each empty index of `target`
|
||||||
|
function proj_diff(target, attempt)
|
||||||
|
I, J, values = findnz(target)
|
||||||
|
result = zeros(size(target)...)
|
||||||
|
for (i, j, val) in zip(I, J, values)
|
||||||
|
result[i, j] = val - attempt[i, j]
|
||||||
|
end
|
||||||
|
result
|
||||||
|
end
|
||||||
|
|
||||||
|
# === example ===
|
||||||
|
|
||||||
|
# the Lorentz form
|
||||||
|
Q = diagm([-1, 1, 1, 1, 1])
|
||||||
|
|
||||||
|
# initialize the partial gram matrix for an arrangement of seven spheres in
|
||||||
|
# which spheres 1 through 5 are mutually tangent, and spheres 3 through 7 are
|
||||||
|
# also mutually tangent
|
||||||
|
I = Int64[]
|
||||||
|
J = Int64[]
|
||||||
|
values = BigFloat[]
|
||||||
|
for i in 1:7
|
||||||
|
for j in 1:7
|
||||||
|
if (i <= 5 && j <= 5) || (i >= 3 && j >= 3)
|
||||||
|
push!(I, i)
|
||||||
|
push!(J, j)
|
||||||
|
push!(values, i == j ? 1 : -1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
gram = sparse(I, J, values)
|
||||||
|
|
||||||
|
# set the independent variable
|
||||||
|
#
|
||||||
|
# using gram[6, 2] or gram[7, 1] as the independent variable seems to stall
|
||||||
|
# convergence, even if its value comes from a known solution, like
|
||||||
|
#
|
||||||
|
# gram[6, 2] = 0.9936131705272925
|
||||||
|
#
|
||||||
|
indep_val = -9//5
|
||||||
|
gram[6, 1] = BigFloat(indep_val)
|
||||||
|
gram[1, 6] = gram[6, 1]
|
||||||
|
|
||||||
|
# in this initial guess, the mutual tangency condition is satisfied for spheres
|
||||||
|
# 1 through 5
|
||||||
|
guess = sqrt(0.5) * BigFloat[
|
||||||
|
1 1 1 1 2 0.2 0.1;
|
||||||
|
0 0 0 0 -sqrt(6) 0.3 -0.2;
|
||||||
|
1 1 -1 -1 0 -0.1 0.3;
|
||||||
|
1 -1 1 -1 0 -0.5 0.4;
|
||||||
|
1 -1 -1 1 0 0.1 -0.2
|
||||||
|
]
|
||||||
|
|
||||||
|
# search parameters
|
||||||
|
steps = 600
|
||||||
|
line_search_max_steps = 100
|
||||||
|
init_stepsize = BigFloat(1)
|
||||||
|
step_shrink_factor = BigFloat(0.5)
|
||||||
|
target_improvement_factor = BigFloat(0.5)
|
||||||
|
|
||||||
|
# complete the gram matrix using gradient descent
|
||||||
|
loss_history = Array{BigFloat}(undef, steps + 1)
|
||||||
|
stepsize_history = Array{BigFloat}(undef, steps)
|
||||||
|
line_search_depth_history = fill(line_search_max_steps, steps)
|
||||||
|
stepsize = init_stepsize
|
||||||
|
L = copy(guess)
|
||||||
|
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||||
|
loss = norm(Δ_proj)
|
||||||
|
for step in 1:steps
|
||||||
|
# find negative gradient of loss function
|
||||||
|
neg_grad = 4*Q*L*Δ_proj
|
||||||
|
slope = norm(neg_grad)
|
||||||
|
|
||||||
|
# store current position and loss
|
||||||
|
L_last = L
|
||||||
|
loss_last = loss
|
||||||
|
loss_history[step] = loss
|
||||||
|
|
||||||
|
# find a good step size using backtracking line search
|
||||||
|
for line_search_depth in 1:line_search_max_steps
|
||||||
|
stepsize_history[step] = stepsize
|
||||||
|
global L = L_last + stepsize * neg_grad
|
||||||
|
global Δ_proj = proj_diff(gram, L'*Q*L)
|
||||||
|
global loss = norm(Δ_proj)
|
||||||
|
improvement = loss_last - loss
|
||||||
|
if improvement >= target_improvement_factor * stepsize * slope
|
||||||
|
line_search_depth_history[step] = line_search_depth
|
||||||
|
break
|
||||||
|
end
|
||||||
|
global stepsize *= step_shrink_factor
|
||||||
|
end
|
||||||
|
end
|
||||||
|
completed_gram = L'*Q*L
|
||||||
|
loss_history[steps + 1] = loss
|
||||||
|
println("Completed Gram matrix:\n")
|
||||||
|
display(completed_gram)
|
||||||
|
println("\nLoss: ", loss, "\n")
|
||||||
|
|
||||||
|
# === algebraic check ===
|
||||||
|
|
||||||
|
R, gens = polynomial_ring(Generic.Rationals{BigInt}(), ["x", "t₁", "t₂", "t₃"])
|
||||||
|
x = gens[1]
|
||||||
|
t = gens[2:4]
|
||||||
|
|
||||||
|
S, u = polynomial_ring(Generic.Rationals{BigInt}(), "u")
|
||||||
|
|
||||||
|
M = matrix_space(R, 7, 7)
|
||||||
|
gram_symb = M(R[
|
||||||
|
1 -1 -1 -1 -1 t[1] t[2];
|
||||||
|
-1 1 -1 -1 -1 x t[3]
|
||||||
|
-1 -1 1 -1 -1 -1 -1;
|
||||||
|
-1 -1 -1 1 -1 -1 -1;
|
||||||
|
-1 -1 -1 -1 1 -1 -1;
|
||||||
|
t[1] x -1 -1 -1 1 -1;
|
||||||
|
t[2] t[3] -1 -1 -1 -1 1
|
||||||
|
])
|
||||||
|
rank_constraints = det.([
|
||||||
|
gram_symb[1:6, 1:6],
|
||||||
|
gram_symb[2:7, 2:7],
|
||||||
|
gram_symb[[1, 3, 4, 5, 6, 7], [1, 3, 4, 5, 6, 7]]
|
||||||
|
])
|
||||||
|
|
||||||
|
# solve for x and t
|
||||||
|
x_constraint = 25//16 * to_univariate(S, evaluate(rank_constraints[1], [2], [indep_val]))
|
||||||
|
t₂_constraint = 25//16 * to_univariate(S, evaluate(rank_constraints[3], [2], [indep_val]))
|
||||||
|
x_vals = PolynomialRoots.roots(x_constraint.coeffs)
|
||||||
|
t₂_vals = PolynomialRoots.roots(t₂_constraint.coeffs)
|
49
engine-proto/gram-test/low-rank-test.jl
Normal file
49
engine-proto/gram-test/low-rank-test.jl
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
using LowRankModels
|
||||||
|
using LinearAlgebra
|
||||||
|
using SparseArrays
|
||||||
|
|
||||||
|
# testing Gram matrix recovery using the LowRankModels package
|
||||||
|
|
||||||
|
# initialize the partial gram matrix for an arrangement of seven spheres in
|
||||||
|
# which spheres 1 through 5 are mutually tangent, and spheres 3 through 7 are
|
||||||
|
# also mutually tangent
|
||||||
|
I = Int64[]
|
||||||
|
J = Int64[]
|
||||||
|
values = Float64[]
|
||||||
|
for i in 1:7
|
||||||
|
for j in 1:7
|
||||||
|
if (i <= 5 && j <= 5) || (i >= 3 && j >= 3)
|
||||||
|
push!(I, i)
|
||||||
|
push!(J, j)
|
||||||
|
push!(values, i == j ? 1 : -1)
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
gram = sparse(I, J, values)
|
||||||
|
|
||||||
|
# in this initial guess, the mutual tangency condition is satisfied for spheres
|
||||||
|
# 1 through 5
|
||||||
|
X₀ = sqrt(0.5) * [
|
||||||
|
1 0 1 1 1;
|
||||||
|
1 0 1 -1 -1;
|
||||||
|
1 0 -1 1 -1;
|
||||||
|
1 0 -1 -1 1;
|
||||||
|
2 -sqrt(6) 0 0 0;
|
||||||
|
0.2 0.3 -0.1 -0.2 0.1;
|
||||||
|
0.1 -0.2 0.3 0.4 -0.1
|
||||||
|
]'
|
||||||
|
Y₀ = diagm([-1, 1, 1, 1, 1]) * X₀
|
||||||
|
|
||||||
|
# search parameters
|
||||||
|
search_params = ProxGradParams(
|
||||||
|
1.0;
|
||||||
|
max_iter = 100,
|
||||||
|
inner_iter = 1,
|
||||||
|
abs_tol = 1e-16,
|
||||||
|
rel_tol = 1e-9,
|
||||||
|
min_stepsize = 0.01
|
||||||
|
)
|
||||||
|
|
||||||
|
# complete gram matrix
|
||||||
|
model = GLRM(gram, QuadLoss(), ZeroReg(), ZeroReg(), 5, X = X₀, Y = Y₀)
|
||||||
|
X, Y, history = fit!(model, search_params)
|
37
engine-proto/gram-test/overlap-test.jl
Normal file
37
engine-proto/gram-test/overlap-test.jl
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
using LinearAlgebra
|
||||||
|
using AbstractAlgebra
|
||||||
|
|
||||||
|
function printgood(msg)
|
||||||
|
printstyled("✓", color = :green)
|
||||||
|
println(" ", msg)
|
||||||
|
end
|
||||||
|
|
||||||
|
function printbad(msg)
|
||||||
|
printstyled("✗", color = :red)
|
||||||
|
println(" ", msg)
|
||||||
|
end
|
||||||
|
|
||||||
|
F, gens = rational_function_field(Generic.Rationals{BigInt}(), ["x", "t₁", "t₂", "t₃"])
|
||||||
|
x = gens[1]
|
||||||
|
t = gens[2:4]
|
||||||
|
|
||||||
|
# three mutually tangent spheres which are all perpendicular to the x, y plane
|
||||||
|
M = matrix_space(F, 7, 7)
|
||||||
|
gram = M(F[
|
||||||
|
1 -1 -1 -1 -1 t[1] t[2];
|
||||||
|
-1 1 -1 -1 -1 x t[3]
|
||||||
|
-1 -1 1 -1 -1 -1 -1;
|
||||||
|
-1 -1 -1 1 -1 -1 -1;
|
||||||
|
-1 -1 -1 -1 1 -1 -1;
|
||||||
|
t[1] x -1 -1 -1 1 -1;
|
||||||
|
t[2] t[3] -1 -1 -1 -1 1
|
||||||
|
])
|
||||||
|
|
||||||
|
r, p, L, U = lu(gram)
|
||||||
|
if isone(p)
|
||||||
|
printgood("Found a solution")
|
||||||
|
else
|
||||||
|
printbad("Didn't find a solution")
|
||||||
|
end
|
||||||
|
solution = transpose(L)
|
||||||
|
mform = U * inv(solution)
|
Loading…
Reference in New Issue
Block a user