Keep optimizing until the total loss is stationary

This commit is contained in:
Aaron Fenyes 2025-09-18 13:20:04 -07:00
parent 3664ea73b1
commit a203f6bc1b

View file

@ -413,7 +413,9 @@ pub fn realize_gram(
let scale_adjustment = (gram.0.len() as f64).sqrt();
let tol = scale_adjustment * scaled_tol;
// initialize the softness parameter
// set up constants and variables related to minimizing the soft loss
const GRAD_TOL: f64 = 1e-9;
let mut grad_size = f64::INFINITY;
let mut softness = 1.0;
// convert the frozen indices to stacked format
@ -468,10 +470,13 @@ pub fn realize_gram(
hess[(k, k)] = 1.0;
}
// stop if the hard loss is tolerably low
// stop if the hard loss is tolerably low and the total loss is close to
// stationary. we use `neg_grad_stacked` to measure the size of the
// gradient because it's been projected onto the frozen subspace
history.config.push(state.config.clone());
history.scaled_loss.push(state.loss_hard / scale_adjustment);
if state.loss_hard < tol { break; }
grad_size = neg_grad_stacked.norm_squared();
if state.loss_hard < tol && grad_size < GRAD_TOL { break; }
// compute the Newton step
/* TO DO */
@ -509,15 +514,14 @@ pub fn realize_gram(
// if we're near a minimum of the total loss, but the hard loss still
// isn't tolerably low, make the soft constraints softer
const GRAD_TOL: f64 = 1e-4;
const SOFTNESS_BACKOFF_THRESHOLD: f64 = 1e-6;
const SOFTNESS_BACKOFF: f64 = 0.5;
if neg_grad.norm_squared() < GRAD_TOL {
// if we're close to a minimum, make the soft constraints softer
if state.loss_hard >= tol && grad_size < SOFTNESS_BACKOFF_THRESHOLD {
softness *= SOFTNESS_BACKOFF;
console_log!("Softness decreased to {softness}");
}
}
let result = if state.loss_hard < tol {
let result = if state.loss_hard < tol && grad_size < GRAD_TOL {
// express the uniform basis in the standard basis
const UNIFORM_DIM: usize = 4;
let total_dim_unif = UNIFORM_DIM * assembly_dim;