Do gradient descent entirely in BigFloat
The previos version accidentally returned steps in Float64.
This commit is contained in:
parent
242d630cc6
commit
e7dde5800c
@ -11,7 +11,7 @@ using PolynomialRoots
|
|||||||
# subspace of matrices whose entries vanish at each empty index of `target`
|
# subspace of matrices whose entries vanish at each empty index of `target`
|
||||||
function proj_diff(target, attempt)
|
function proj_diff(target, attempt)
|
||||||
J, K, values = findnz(target)
|
J, K, values = findnz(target)
|
||||||
result = zeros(size(target)...)
|
result = zeros(BigFloat, size(target)...)
|
||||||
for (j, k, val) in zip(J, K, values)
|
for (j, k, val) in zip(J, K, values)
|
||||||
result[j, k] = val - attempt[j, k]
|
result[j, k] = val - attempt[j, k]
|
||||||
end
|
end
|
||||||
@ -65,7 +65,7 @@ guess = sqrt(0.5) * BigFloat[
|
|||||||
steps = 600
|
steps = 600
|
||||||
line_search_max_steps = 100
|
line_search_max_steps = 100
|
||||||
init_stepsize = BigFloat(1)
|
init_stepsize = BigFloat(1)
|
||||||
step_shrink_factor = BigFloat(0.5)
|
step_shrink_factor = BigFloat(0.9)
|
||||||
target_improvement_factor = BigFloat(0.5)
|
target_improvement_factor = BigFloat(0.5)
|
||||||
|
|
||||||
# complete the gram matrix using gradient descent
|
# complete the gram matrix using gradient descent
|
||||||
|
Loading…
Reference in New Issue
Block a user