Introduce soft constraints
Use a penalty method as a quick & dirty way to get started.
This commit is contained in:
parent
9e74d4e837
commit
3664ea73b1
3 changed files with 105 additions and 59 deletions
|
@ -1,6 +1,7 @@
|
|||
use lazy_static::lazy_static;
|
||||
use nalgebra::{Const, DMatrix, DVector, DVectorView, Dyn, SymmetricEigen};
|
||||
use std::fmt::{Display, Error, Formatter};
|
||||
use sycamore::prelude::console_log; /* DEBUG */
|
||||
|
||||
// --- elements ---
|
||||
|
||||
|
@ -240,6 +241,7 @@ impl DescentHistory {
|
|||
|
||||
pub struct ConstraintProblem {
|
||||
pub gram: PartialMatrix,
|
||||
pub soft: PartialMatrix,
|
||||
pub frozen: PartialMatrix,
|
||||
pub guess: DMatrix<f64>,
|
||||
}
|
||||
|
@ -249,6 +251,7 @@ impl ConstraintProblem {
|
|||
const ELEMENT_DIM: usize = 5;
|
||||
Self {
|
||||
gram: PartialMatrix::new(),
|
||||
soft: PartialMatrix::new(),
|
||||
frozen: PartialMatrix::new(),
|
||||
guess: DMatrix::<f64>::zeros(ELEMENT_DIM, element_count),
|
||||
}
|
||||
|
@ -258,6 +261,7 @@ impl ConstraintProblem {
|
|||
pub fn from_guess(guess_columns: &[DVector<f64>]) -> Self {
|
||||
Self {
|
||||
gram: PartialMatrix::new(),
|
||||
soft: PartialMatrix::new(),
|
||||
frozen: PartialMatrix::new(),
|
||||
guess: DMatrix::from_columns(guess_columns),
|
||||
}
|
||||
|
@ -280,14 +284,18 @@ lazy_static! {
|
|||
struct SearchState {
|
||||
config: DMatrix<f64>,
|
||||
err_proj: DMatrix<f64>,
|
||||
loss_hard: f64,
|
||||
loss: f64,
|
||||
}
|
||||
|
||||
impl SearchState {
|
||||
fn from_config(gram: &PartialMatrix, config: DMatrix<f64>) -> Self {
|
||||
let err_proj = gram.sub_proj(&(config.tr_mul(&*Q) * &config));
|
||||
fn from_config(gram: &PartialMatrix, soft: &PartialMatrix, softness: f64, config: DMatrix<f64>) -> Self {
|
||||
let config_gram = &(config.tr_mul(&*Q) * &config);
|
||||
let err_proj_hard = gram.sub_proj(config_gram);
|
||||
let err_proj = &err_proj_hard + softness * soft.sub_proj(config_gram);
|
||||
let loss_hard = err_proj_hard.norm_squared();
|
||||
let loss = err_proj.norm_squared();
|
||||
Self { config, err_proj, loss }
|
||||
Self { config, err_proj, loss_hard, loss }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -331,6 +339,8 @@ pub fn local_unif_to_std(v: DVectorView<f64>) -> DMatrix<f64> {
|
|||
// use backtracking line search to find a better configuration
|
||||
fn seek_better_config(
|
||||
gram: &PartialMatrix,
|
||||
soft: &PartialMatrix,
|
||||
softness: f64,
|
||||
state: &SearchState,
|
||||
base_step: &DMatrix<f64>,
|
||||
base_target_improvement: f64,
|
||||
|
@ -341,7 +351,7 @@ fn seek_better_config(
|
|||
let mut rate = 1.0;
|
||||
for backoff_steps in 0..max_backoff_steps {
|
||||
let trial_config = &state.config + rate * base_step;
|
||||
let trial_state = SearchState::from_config(gram, trial_config);
|
||||
let trial_state = SearchState::from_config(gram, soft, softness, trial_config);
|
||||
let improvement = state.loss - trial_state.loss;
|
||||
if improvement >= min_efficiency * rate * base_target_improvement {
|
||||
return Some((trial_state, backoff_steps));
|
||||
|
@ -376,7 +386,7 @@ pub fn realize_gram(
|
|||
max_backoff_steps: i32,
|
||||
) -> Realization {
|
||||
// destructure the problem data
|
||||
let ConstraintProblem { gram, guess, frozen } = problem;
|
||||
let ConstraintProblem { gram, soft, guess, frozen } = problem;
|
||||
|
||||
// start the descent history
|
||||
let mut history = DescentHistory::new();
|
||||
|
@ -403,13 +413,16 @@ pub fn realize_gram(
|
|||
let scale_adjustment = (gram.0.len() as f64).sqrt();
|
||||
let tol = scale_adjustment * scaled_tol;
|
||||
|
||||
// initialize the softness parameter
|
||||
let mut softness = 1.0;
|
||||
|
||||
// convert the frozen indices to stacked format
|
||||
let frozen_stacked: Vec<usize> = frozen.into_iter().map(
|
||||
|MatrixEntry { index: (row, col), .. }| col*element_dim + row
|
||||
).collect();
|
||||
|
||||
// use a regularized Newton's method with backtracking
|
||||
let mut state = SearchState::from_config(gram, frozen.freeze(guess));
|
||||
let mut state = SearchState::from_config(gram, soft, softness, frozen.freeze(guess));
|
||||
let mut hess = DMatrix::zeros(element_dim, assembly_dim);
|
||||
for _ in 0..max_descent_steps {
|
||||
// find the negative gradient of the loss function
|
||||
|
@ -426,7 +439,7 @@ pub fn realize_gram(
|
|||
let neg_d_err =
|
||||
basis_mat.tr_mul(&*Q) * &state.config
|
||||
+ state.config.tr_mul(&*Q) * &basis_mat;
|
||||
let neg_d_err_proj = gram.proj(&neg_d_err);
|
||||
let neg_d_err_proj = gram.proj(&neg_d_err) + softness * soft.proj(&neg_d_err);
|
||||
let deriv_grad = 4.0 * &*Q * (
|
||||
-&basis_mat * &state.err_proj
|
||||
+ &state.config * &neg_d_err_proj
|
||||
|
@ -455,10 +468,10 @@ pub fn realize_gram(
|
|||
hess[(k, k)] = 1.0;
|
||||
}
|
||||
|
||||
// stop if the loss is tolerably low
|
||||
// stop if the hard loss is tolerably low
|
||||
history.config.push(state.config.clone());
|
||||
history.scaled_loss.push(state.loss / scale_adjustment);
|
||||
if state.loss < tol { break; }
|
||||
history.scaled_loss.push(state.loss_hard / scale_adjustment);
|
||||
if state.loss_hard < tol { break; }
|
||||
|
||||
// compute the Newton step
|
||||
/* TO DO */
|
||||
|
@ -482,7 +495,7 @@ pub fn realize_gram(
|
|||
|
||||
// use backtracking line search to find a better configuration
|
||||
if let Some((better_state, backoff_steps)) = seek_better_config(
|
||||
gram, &state, &base_step, neg_grad.dot(&base_step),
|
||||
gram, soft, softness, &state, &base_step, neg_grad.dot(&base_step),
|
||||
min_efficiency, backoff, max_backoff_steps,
|
||||
) {
|
||||
state = better_state;
|
||||
|
@ -493,8 +506,18 @@ pub fn realize_gram(
|
|||
history,
|
||||
};
|
||||
}
|
||||
|
||||
// if we're near a minimum of the total loss, but the hard loss still
|
||||
// isn't tolerably low, make the soft constraints softer
|
||||
const GRAD_TOL: f64 = 1e-4;
|
||||
const SOFTNESS_BACKOFF: f64 = 0.5;
|
||||
if neg_grad.norm_squared() < GRAD_TOL {
|
||||
// if we're close to a minimum, make the soft constraints softer
|
||||
softness *= SOFTNESS_BACKOFF;
|
||||
console_log!("Softness decreased to {softness}");
|
||||
}
|
||||
}
|
||||
let result = if state.loss < tol {
|
||||
let result = if state.loss_hard < tol {
|
||||
// express the uniform basis in the standard basis
|
||||
const UNIFORM_DIM: usize = 4;
|
||||
let total_dim_unif = UNIFORM_DIM * assembly_dim;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue