Clean up backtracking gradient descent code

Drop experimental singularity handling strategies. Reduce the default
tolerance to within 64-bit floating point precision. Report success.
This commit is contained in:
Aaron Fenyes 2024-07-15 13:15:15 -07:00
parent 25b09ebf92
commit 7b3efbc385
4 changed files with 41 additions and 82 deletions

View file

@ -86,7 +86,7 @@ L, history = Engine.realize_gram_gradient(gram, guess, scaled_tol = 0.01)
L_pol, history_pol = Engine.realize_gram_newton(gram, L, rate = 0.3, scaled_tol = 1e-9)
L_pol2, history_pol2 = Engine.realize_gram_newton(gram, L_pol)
=#
L, history = Engine.realize_gram(Float64.(gram), Float64.(guess))
L, success, history = Engine.realize_gram(Float64.(gram), Float64.(guess))
completed_gram = L'*Engine.Q*L
println("Completed Gram matrix:\n")
display(completed_gram)
@ -99,5 +99,10 @@ println(
)
println("Loss: ", history_pol2.scaled_loss[end], "\n")
=#
println("\nSteps: ", size(history.scaled_loss, 1))
if success
println("\nTarget accuracy achieved!")
else
println("\nFailed to reach target accuracy")
end
println("Steps: ", size(history.scaled_loss, 1))
println("Loss: ", history.scaled_loss[end], "\n")