Change loss function to match gradient
This commit is contained in:
parent
023759a267
commit
77bc124170
@ -104,7 +104,7 @@ function realize_gram(
|
|||||||
|
|
||||||
# do gradient descent
|
# do gradient descent
|
||||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||||
loss = norm(Δ_proj)
|
loss = dot(Δ_proj, Δ_proj)
|
||||||
for step in 1:max_descent_steps
|
for step in 1:max_descent_steps
|
||||||
# stop if the loss is tolerably low
|
# stop if the loss is tolerably low
|
||||||
if loss < tol
|
if loss < tol
|
||||||
@ -128,7 +128,7 @@ function realize_gram(
|
|||||||
history.stepsize[end] = stepsize
|
history.stepsize[end] = stepsize
|
||||||
L = L_last + stepsize * neg_grad
|
L = L_last + stepsize * neg_grad
|
||||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||||
loss = norm(Δ_proj)
|
loss = dot(Δ_proj, Δ_proj)
|
||||||
improvement = loss_last - loss
|
improvement = loss_last - loss
|
||||||
if improvement >= target_improvement * stepsize * slope
|
if improvement >= target_improvement * stepsize * slope
|
||||||
history.backoff_steps[end] = backoff_steps
|
history.backoff_steps[end] = backoff_steps
|
||||||
|
Loading…
Reference in New Issue
Block a user