Do gradient descent entirely in BigFloat

The previos version accidentally returned steps in Float64.
This commit is contained in:
Aaron Fenyes 2024-07-02 12:35:12 -07:00
parent 242d630cc6
commit e7dde5800c

View File

@ -11,7 +11,7 @@ using PolynomialRoots
# subspace of matrices whose entries vanish at each empty index of `target`
function proj_diff(target, attempt)
J, K, values = findnz(target)
result = zeros(size(target)...)
result = zeros(BigFloat, size(target)...)
for (j, k, val) in zip(J, K, values)
result[j, k] = val - attempt[j, k]
end
@ -65,7 +65,7 @@ guess = sqrt(0.5) * BigFloat[
steps = 600
line_search_max_steps = 100
init_stepsize = BigFloat(1)
step_shrink_factor = BigFloat(0.5)
step_shrink_factor = BigFloat(0.9)
target_improvement_factor = BigFloat(0.5)
# complete the gram matrix using gradient descent