Clean up backtracking gradient descent code

Drop experimental singularity handling strategies. Reduce the default
tolerance to within 64-bit floating point precision. Report success.
This commit is contained in:
Aaron Fenyes 2024-07-15 13:15:15 -07:00
parent 25b09ebf92
commit 7b3efbc385
4 changed files with 41 additions and 82 deletions

View file

@ -52,7 +52,7 @@ guess = hcat(
L, history = Engine.realize_gram_gradient(gram, guess, scaled_tol = 0.01)
L_pol, history_pol = Engine.realize_gram_newton(gram, L)
=#
L, history = Engine.realize_gram(Float64.(gram), Float64.(guess))
L, success, history = Engine.realize_gram(Float64.(gram), Float64.(guess))
completed_gram = L'*Engine.Q*L
println("Completed Gram matrix:\n")
display(completed_gram)
@ -60,7 +60,12 @@ display(completed_gram)
println("\nSteps: ", size(history.scaled_loss, 1), " + ", size(history_pol.scaled_loss, 1))
println("Loss: ", history_pol.scaled_loss[end], "\n")
=#
println("\nSteps: ", size(history.scaled_loss, 1))
if success
println("\nTarget accuracy achieved!")
else
println("\nFailed to reach target accuracy")
end
println("Steps: ", size(history.scaled_loss, 1))
println("Loss: ", history.scaled_loss[end], "\n")
# === algebraic check ===