Preserve explicit zeros in Gram matrix conversion

In previous commits, the `circles-in-triangle` example converged much
more slowly in BigFloat precision than in Float64 precision. This
turned out to be a sign of a bug in the Float64 computation: converting
the Gram matrix using `Float64.()` dropped the explicit zeros, removing
many constraints and making the problem much easier to solve. This
commit corrects the Gram matrix conversion. The Float64 search now
solves the same problem as the BigFloat search, with comparable
performance.
This commit is contained in:
Aaron Fenyes 2024-07-15 14:08:57 -07:00
parent 7b3efbc385
commit 53d8c38047
2 changed files with 6 additions and 1 deletions

View File

@ -254,6 +254,11 @@ end
LinearAlgebra.eigen!(A::Symmetric{BigFloat, Matrix{BigFloat}}; sortby::Nothing) =
eigen!(Hermitian(A))
function convertnz(type, mat)
J, K, values = findnz(mat)
sparse(J, K, type.(values))
end
function realize_gram_optim(
gram::SparseMatrixCSC{T, <:Any},
guess::Matrix{T}

View File

@ -86,7 +86,7 @@ L, history = Engine.realize_gram_gradient(gram, guess, scaled_tol = 0.01)
L_pol, history_pol = Engine.realize_gram_newton(gram, L, rate = 0.3, scaled_tol = 1e-9)
L_pol2, history_pol2 = Engine.realize_gram_newton(gram, L_pol)
=#
L, success, history = Engine.realize_gram(Float64.(gram), Float64.(guess))
L, success, history = Engine.realize_gram(Engine.convertnz(Float64, gram), Float64.(guess))
completed_gram = L'*Engine.Q*L
println("Completed Gram matrix:\n")
display(completed_gram)