Reorganize search state; remove unused variables
This commit is contained in:
parent
16df161fe7
commit
e59d60bf77
@ -324,7 +324,6 @@ function realize_gram_alt_proj(
|
|||||||
frozen = CartesianIndex[];
|
frozen = CartesianIndex[];
|
||||||
scaled_tol = 1e-30,
|
scaled_tol = 1e-30,
|
||||||
min_efficiency = 0.5,
|
min_efficiency = 0.5,
|
||||||
init_rate = 1.0,
|
|
||||||
backoff = 0.9,
|
backoff = 0.9,
|
||||||
reg_scale = 1.1,
|
reg_scale = 1.1,
|
||||||
max_descent_steps = 200,
|
max_descent_steps = 200,
|
||||||
@ -349,13 +348,12 @@ function realize_gram_alt_proj(
|
|||||||
# convert the frozen indices to stacked format
|
# convert the frozen indices to stacked format
|
||||||
frozen_stacked = [(index[2]-1)*element_dim + index[1] for index in frozen]
|
frozen_stacked = [(index[2]-1)*element_dim + index[1] for index in frozen]
|
||||||
|
|
||||||
# initialize variables
|
# initialize search state
|
||||||
grad_rate = init_rate
|
|
||||||
L = copy(guess)
|
L = copy(guess)
|
||||||
|
|
||||||
# use Newton's method with backtracking and gradient descent backup
|
|
||||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||||
loss = dot(Δ_proj, Δ_proj)
|
loss = dot(Δ_proj, Δ_proj)
|
||||||
|
|
||||||
|
# use Newton's method with backtracking and gradient descent backup
|
||||||
for step in 1:max_descent_steps
|
for step in 1:max_descent_steps
|
||||||
# stop if the loss is tolerably low
|
# stop if the loss is tolerably low
|
||||||
if loss < tol
|
if loss < tol
|
||||||
@ -411,6 +409,7 @@ function realize_gram_alt_proj(
|
|||||||
empty!(history.last_line_loss)
|
empty!(history.last_line_loss)
|
||||||
rate = one(T)
|
rate = one(T)
|
||||||
step_success = false
|
step_success = false
|
||||||
|
base_target_improvement = dot(neg_grad, base_step)
|
||||||
for backoff_steps in 0:max_backoff_steps
|
for backoff_steps in 0:max_backoff_steps
|
||||||
history.stepsize[end] = rate
|
history.stepsize[end] = rate
|
||||||
L = L_last + rate * base_step
|
L = L_last + rate * base_step
|
||||||
@ -419,7 +418,7 @@ function realize_gram_alt_proj(
|
|||||||
improvement = loss_last - loss
|
improvement = loss_last - loss
|
||||||
push!(history.last_line_L, L)
|
push!(history.last_line_L, L)
|
||||||
push!(history.last_line_loss, loss / scale_adjustment)
|
push!(history.last_line_loss, loss / scale_adjustment)
|
||||||
if improvement >= min_efficiency * rate * dot(neg_grad, base_step)
|
if improvement >= min_efficiency * rate * base_target_improvement
|
||||||
history.backoff_steps[end] = backoff_steps
|
history.backoff_steps[end] = backoff_steps
|
||||||
step_success = true
|
step_success = true
|
||||||
break
|
break
|
||||||
@ -446,7 +445,6 @@ function realize_gram(
|
|||||||
frozen = nothing;
|
frozen = nothing;
|
||||||
scaled_tol = 1e-30,
|
scaled_tol = 1e-30,
|
||||||
min_efficiency = 0.5,
|
min_efficiency = 0.5,
|
||||||
init_rate = 1.0,
|
|
||||||
backoff = 0.9,
|
backoff = 0.9,
|
||||||
reg_scale = 1.1,
|
reg_scale = 1.1,
|
||||||
max_descent_steps = 200,
|
max_descent_steps = 200,
|
||||||
@ -477,13 +475,12 @@ function realize_gram(
|
|||||||
unfrozen_stacked = reshape(is_unfrozen, total_dim)
|
unfrozen_stacked = reshape(is_unfrozen, total_dim)
|
||||||
end
|
end
|
||||||
|
|
||||||
# initialize variables
|
# initialize search state
|
||||||
grad_rate = init_rate
|
|
||||||
L = copy(guess)
|
L = copy(guess)
|
||||||
|
|
||||||
# use Newton's method with backtracking and gradient descent backup
|
|
||||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||||
loss = dot(Δ_proj, Δ_proj)
|
loss = dot(Δ_proj, Δ_proj)
|
||||||
|
|
||||||
|
# use Newton's method with backtracking and gradient descent backup
|
||||||
for step in 1:max_descent_steps
|
for step in 1:max_descent_steps
|
||||||
# stop if the loss is tolerably low
|
# stop if the loss is tolerably low
|
||||||
if loss < tol
|
if loss < tol
|
||||||
@ -545,6 +542,7 @@ function realize_gram(
|
|||||||
empty!(history.last_line_loss)
|
empty!(history.last_line_loss)
|
||||||
rate = one(T)
|
rate = one(T)
|
||||||
step_success = false
|
step_success = false
|
||||||
|
base_target_improvement = dot(neg_grad, base_step)
|
||||||
for backoff_steps in 0:max_backoff_steps
|
for backoff_steps in 0:max_backoff_steps
|
||||||
history.stepsize[end] = rate
|
history.stepsize[end] = rate
|
||||||
L = L_last + rate * base_step
|
L = L_last + rate * base_step
|
||||||
@ -553,7 +551,7 @@ function realize_gram(
|
|||||||
improvement = loss_last - loss
|
improvement = loss_last - loss
|
||||||
push!(history.last_line_L, L)
|
push!(history.last_line_L, L)
|
||||||
push!(history.last_line_loss, loss / scale_adjustment)
|
push!(history.last_line_loss, loss / scale_adjustment)
|
||||||
if improvement >= min_efficiency * rate * dot(neg_grad, base_step)
|
if improvement >= min_efficiency * rate * base_target_improvement
|
||||||
history.backoff_steps[end] = backoff_steps
|
history.backoff_steps[end] = backoff_steps
|
||||||
step_success = true
|
step_success = true
|
||||||
break
|
break
|
||||||
|
Loading…
Reference in New Issue
Block a user