Integrate engine into application prototype #15

Merged
glen merged 24 commits from engine-integration into main 2024-11-12 00:46:16 +00:00
Showing only changes of commit e59d60bf77 - Show all commits

View File

@ -324,7 +324,6 @@ function realize_gram_alt_proj(
frozen = CartesianIndex[];
scaled_tol = 1e-30,
min_efficiency = 0.5,
init_rate = 1.0,
backoff = 0.9,
reg_scale = 1.1,
max_descent_steps = 200,
@ -349,13 +348,12 @@ function realize_gram_alt_proj(
# convert the frozen indices to stacked format
frozen_stacked = [(index[2]-1)*element_dim + index[1] for index in frozen]
# initialize variables
grad_rate = init_rate
# initialize search state
L = copy(guess)
# use Newton's method with backtracking and gradient descent backup
Δ_proj = proj_diff(gram, L'*Q*L)
loss = dot(Δ_proj, Δ_proj)
# use Newton's method with backtracking and gradient descent backup
for step in 1:max_descent_steps
# stop if the loss is tolerably low
if loss < tol
@ -411,6 +409,7 @@ function realize_gram_alt_proj(
empty!(history.last_line_loss)
rate = one(T)
step_success = false
base_target_improvement = dot(neg_grad, base_step)
for backoff_steps in 0:max_backoff_steps
history.stepsize[end] = rate
L = L_last + rate * base_step
@ -419,7 +418,7 @@ function realize_gram_alt_proj(
improvement = loss_last - loss
push!(history.last_line_L, L)
push!(history.last_line_loss, loss / scale_adjustment)
if improvement >= min_efficiency * rate * dot(neg_grad, base_step)
if improvement >= min_efficiency * rate * base_target_improvement
history.backoff_steps[end] = backoff_steps
step_success = true
break
@ -446,7 +445,6 @@ function realize_gram(
frozen = nothing;
scaled_tol = 1e-30,
min_efficiency = 0.5,
init_rate = 1.0,
backoff = 0.9,
reg_scale = 1.1,
max_descent_steps = 200,
@ -477,13 +475,12 @@ function realize_gram(
unfrozen_stacked = reshape(is_unfrozen, total_dim)
end
# initialize variables
grad_rate = init_rate
# initialize search state
L = copy(guess)
# use Newton's method with backtracking and gradient descent backup
Δ_proj = proj_diff(gram, L'*Q*L)
loss = dot(Δ_proj, Δ_proj)
# use Newton's method with backtracking and gradient descent backup
for step in 1:max_descent_steps
# stop if the loss is tolerably low
if loss < tol
@ -545,6 +542,7 @@ function realize_gram(
empty!(history.last_line_loss)
rate = one(T)
step_success = false
base_target_improvement = dot(neg_grad, base_step)
for backoff_steps in 0:max_backoff_steps
history.stepsize[end] = rate
L = L_last + rate * base_step
@ -553,7 +551,7 @@ function realize_gram(
improvement = loss_last - loss
push!(history.last_line_L, L)
push!(history.last_line_loss, loss / scale_adjustment)
if improvement >= min_efficiency * rate * dot(neg_grad, base_step)
if improvement >= min_efficiency * rate * base_target_improvement
history.backoff_steps[end] = backoff_steps
step_success = true
break