From e7dde5800c79988aa0225d84822d55339b83978f Mon Sep 17 00:00:00 2001 From: Aaron Fenyes Date: Tue, 2 Jul 2024 12:35:12 -0700 Subject: [PATCH] Do gradient descent entirely in BigFloat The previos version accidentally returned steps in Float64. --- engine-proto/gram-test/descent-test.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/engine-proto/gram-test/descent-test.jl b/engine-proto/gram-test/descent-test.jl index 2f20143..168de5d 100644 --- a/engine-proto/gram-test/descent-test.jl +++ b/engine-proto/gram-test/descent-test.jl @@ -11,7 +11,7 @@ using PolynomialRoots # subspace of matrices whose entries vanish at each empty index of `target` function proj_diff(target, attempt) J, K, values = findnz(target) - result = zeros(size(target)...) + result = zeros(BigFloat, size(target)...) for (j, k, val) in zip(J, K, values) result[j, k] = val - attempt[j, k] end @@ -65,7 +65,7 @@ guess = sqrt(0.5) * BigFloat[ steps = 600 line_search_max_steps = 100 init_stepsize = BigFloat(1) -step_shrink_factor = BigFloat(0.5) +step_shrink_factor = BigFloat(0.9) target_improvement_factor = BigFloat(0.5) # complete the gram matrix using gradient descent