From e59d60bf7745dfb25ca054a5beecb08bc8f29b2f Mon Sep 17 00:00:00 2001 From: Aaron Fenyes Date: Fri, 25 Oct 2024 17:17:49 -0700 Subject: [PATCH] Reorganize search state; remove unused variables --- engine-proto/gram-test/Engine.jl | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/engine-proto/gram-test/Engine.jl b/engine-proto/gram-test/Engine.jl index 1eb72f5..6dfb6e9 100644 --- a/engine-proto/gram-test/Engine.jl +++ b/engine-proto/gram-test/Engine.jl @@ -324,7 +324,6 @@ function realize_gram_alt_proj( frozen = CartesianIndex[]; scaled_tol = 1e-30, min_efficiency = 0.5, - init_rate = 1.0, backoff = 0.9, reg_scale = 1.1, max_descent_steps = 200, @@ -349,13 +348,12 @@ function realize_gram_alt_proj( # convert the frozen indices to stacked format frozen_stacked = [(index[2]-1)*element_dim + index[1] for index in frozen] - # initialize variables - grad_rate = init_rate + # initialize search state L = copy(guess) - - # use Newton's method with backtracking and gradient descent backup Δ_proj = proj_diff(gram, L'*Q*L) loss = dot(Δ_proj, Δ_proj) + + # use Newton's method with backtracking and gradient descent backup for step in 1:max_descent_steps # stop if the loss is tolerably low if loss < tol @@ -411,6 +409,7 @@ function realize_gram_alt_proj( empty!(history.last_line_loss) rate = one(T) step_success = false + base_target_improvement = dot(neg_grad, base_step) for backoff_steps in 0:max_backoff_steps history.stepsize[end] = rate L = L_last + rate * base_step @@ -419,7 +418,7 @@ function realize_gram_alt_proj( improvement = loss_last - loss push!(history.last_line_L, L) push!(history.last_line_loss, loss / scale_adjustment) - if improvement >= min_efficiency * rate * dot(neg_grad, base_step) + if improvement >= min_efficiency * rate * base_target_improvement history.backoff_steps[end] = backoff_steps step_success = true break @@ -446,7 +445,6 @@ function realize_gram( frozen = nothing; scaled_tol = 1e-30, min_efficiency = 0.5, - init_rate = 1.0, backoff = 0.9, reg_scale = 1.1, max_descent_steps = 200, @@ -477,13 +475,12 @@ function realize_gram( unfrozen_stacked = reshape(is_unfrozen, total_dim) end - # initialize variables - grad_rate = init_rate + # initialize search state L = copy(guess) - - # use Newton's method with backtracking and gradient descent backup Δ_proj = proj_diff(gram, L'*Q*L) loss = dot(Δ_proj, Δ_proj) + + # use Newton's method with backtracking and gradient descent backup for step in 1:max_descent_steps # stop if the loss is tolerably low if loss < tol @@ -545,6 +542,7 @@ function realize_gram( empty!(history.last_line_loss) rate = one(T) step_success = false + base_target_improvement = dot(neg_grad, base_step) for backoff_steps in 0:max_backoff_steps history.stepsize[end] = rate L = L_last + rate * base_step @@ -553,7 +551,7 @@ function realize_gram( improvement = loss_last - loss push!(history.last_line_L, L) push!(history.last_line_loss, loss / scale_adjustment) - if improvement >= min_efficiency * rate * dot(neg_grad, base_step) + if improvement >= min_efficiency * rate * base_target_improvement history.backoff_steps[end] = backoff_steps step_success = true break