Engine: Find the tangent space of the solution variety
At the end of the realization routine, use the computed Hessian to find the tangent space of the solution variety, and return it alongside the realization. Since altering the constraints can change the tangent space without changing the solution, we compute the tangent space even when the guess passed to the realization routine is already a solution.
This commit is contained in:
parent
b490c8707f
commit
2c55a63a6f
@ -2,7 +2,7 @@ use dyna3::engine::{Q, irisawa::realize_irisawa_hexlet};
|
|||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
const SCALED_TOL: f64 = 1.0e-12;
|
const SCALED_TOL: f64 = 1.0e-12;
|
||||||
let (config, success, history) = realize_irisawa_hexlet(SCALED_TOL);
|
let (config, _, success, history) = realize_irisawa_hexlet(SCALED_TOL);
|
||||||
print!("\nCompleted Gram matrix:{}", config.tr_mul(&*Q) * &config);
|
print!("\nCompleted Gram matrix:{}", config.tr_mul(&*Q) * &config);
|
||||||
if success {
|
if success {
|
||||||
println!("Target accuracy achieved!");
|
println!("Target accuracy achieved!");
|
||||||
|
@ -18,7 +18,7 @@ fn main() {
|
|||||||
]);
|
]);
|
||||||
let frozen = [(3, 0)];
|
let frozen = [(3, 0)];
|
||||||
println!();
|
println!();
|
||||||
let (config, success, history) = realize_gram(
|
let (config, _, success, history) = realize_gram(
|
||||||
&gram, guess, &frozen,
|
&gram, guess, &frozen,
|
||||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||||
);
|
);
|
||||||
|
@ -21,7 +21,7 @@ fn main() {
|
|||||||
])
|
])
|
||||||
};
|
};
|
||||||
println!();
|
println!();
|
||||||
let (config, success, history) = realize_gram(
|
let (config, _, success, history) = realize_gram(
|
||||||
&gram, guess, &[],
|
&gram, guess, &[],
|
||||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||||
);
|
);
|
||||||
|
@ -5,7 +5,7 @@ use std::{collections::BTreeSet, sync::atomic::{AtomicU64, Ordering}};
|
|||||||
use sycamore::prelude::*;
|
use sycamore::prelude::*;
|
||||||
use web_sys::{console, wasm_bindgen::JsValue}; /* DEBUG */
|
use web_sys::{console, wasm_bindgen::JsValue}; /* DEBUG */
|
||||||
|
|
||||||
use crate::engine::{realize_gram, PartialMatrix};
|
use crate::engine::{realize_gram, ConfigSubspace, PartialMatrix};
|
||||||
|
|
||||||
// the types of the keys we use to access an assembly's elements and constraints
|
// the types of the keys we use to access an assembly's elements and constraints
|
||||||
pub type ElementKey = usize;
|
pub type ElementKey = usize;
|
||||||
@ -109,7 +109,6 @@ impl Element {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct Constraint {
|
pub struct Constraint {
|
||||||
@ -127,6 +126,9 @@ pub struct Assembly {
|
|||||||
pub elements: Signal<Slab<Element>>,
|
pub elements: Signal<Slab<Element>>,
|
||||||
pub constraints: Signal<Slab<Constraint>>,
|
pub constraints: Signal<Slab<Constraint>>,
|
||||||
|
|
||||||
|
// solution variety tangent space
|
||||||
|
pub tangent: Signal<ConfigSubspace>,
|
||||||
|
|
||||||
// indexing
|
// indexing
|
||||||
pub elements_by_id: Signal<FxHashMap<String, ElementKey>>
|
pub elements_by_id: Signal<FxHashMap<String, ElementKey>>
|
||||||
}
|
}
|
||||||
@ -136,6 +138,7 @@ impl Assembly {
|
|||||||
Assembly {
|
Assembly {
|
||||||
elements: create_signal(Slab::new()),
|
elements: create_signal(Slab::new()),
|
||||||
constraints: create_signal(Slab::new()),
|
constraints: create_signal(Slab::new()),
|
||||||
|
tangent: create_signal(ConfigSubspace::zero()),
|
||||||
elements_by_id: create_signal(FxHashMap::default())
|
elements_by_id: create_signal(FxHashMap::default())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -247,7 +250,7 @@ impl Assembly {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// look for a configuration with the given Gram matrix
|
// look for a configuration with the given Gram matrix
|
||||||
let (config, success, history) = realize_gram(
|
let (config, tangent, success, history) = realize_gram(
|
||||||
&gram, guess, &[],
|
&gram, guess, &[],
|
||||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||||
);
|
);
|
||||||
@ -271,6 +274,9 @@ impl Assembly {
|
|||||||
|rep| rep.set_column(0, &config.column(elt.column_index))
|
|rep| rep.set_column(0, &config.column(elt.column_index))
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// save the tangent space
|
||||||
|
self.tangent.set_silent(tangent);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -1,5 +1,5 @@
|
|||||||
use lazy_static::lazy_static;
|
use lazy_static::lazy_static;
|
||||||
use nalgebra::{Const, DMatrix, DVector, Dyn};
|
use nalgebra::{Const, DMatrix, DVector, DVectorView, Dyn, SymmetricEigen};
|
||||||
use web_sys::{console, wasm_bindgen::JsValue}; /* DEBUG */
|
use web_sys::{console, wasm_bindgen::JsValue}; /* DEBUG */
|
||||||
|
|
||||||
// --- elements ---
|
// --- elements ---
|
||||||
@ -85,6 +85,47 @@ impl PartialMatrix {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// --- configuration subspaces ---
|
||||||
|
|
||||||
|
pub struct ConfigSubspace(Vec<DMatrix<f64>>);
|
||||||
|
|
||||||
|
impl ConfigSubspace {
|
||||||
|
pub fn zero() -> ConfigSubspace {
|
||||||
|
ConfigSubspace(Vec::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
// approximate the kernel of a symmetric endomorphism of the configuration
|
||||||
|
// space for `assembly_dim` elements. we consider an eigenvector to be part
|
||||||
|
// of the kernel if its eigenvalue is smaller than the constant `THRESHOLD`
|
||||||
|
fn symmetric_kernel(a: DMatrix<f64>, assembly_dim: usize) -> ConfigSubspace {
|
||||||
|
const ELEMENT_DIM: usize = 5;
|
||||||
|
const THRESHOLD: f64 = 1.0e-9;
|
||||||
|
let eig = SymmetricEigen::new(a);
|
||||||
|
let eig_vecs = eig.eigenvectors.column_iter();
|
||||||
|
let eig_pairs = eig.eigenvalues.iter().zip(eig_vecs);
|
||||||
|
let basis = eig_pairs.filter_map(
|
||||||
|
|(λ, v)| (λ.abs() < THRESHOLD).then_some(
|
||||||
|
Into::<DMatrix<f64>>::into(
|
||||||
|
v.reshape_generic(Dyn(ELEMENT_DIM), Dyn(assembly_dim))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
);
|
||||||
|
ConfigSubspace(basis.collect())
|
||||||
|
}
|
||||||
|
|
||||||
|
// find the projection onto this subspace of the motion where the element
|
||||||
|
// with the given column index has velocity `v`
|
||||||
|
/* TO DO */
|
||||||
|
// for the zero subspace, this method's behavior doesn't match its name: it
|
||||||
|
// panics rather than returning zero
|
||||||
|
fn proj(&self, v: &DVectorView<f64>, column_index: usize) -> DMatrix<f64> {
|
||||||
|
let ConfigSubspace(basis) = self;
|
||||||
|
basis.into_iter().map(
|
||||||
|
|b| b.column(column_index).dot(&v) * b
|
||||||
|
).sum()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// --- descent history ---
|
// --- descent history ---
|
||||||
|
|
||||||
pub struct DescentHistory {
|
pub struct DescentHistory {
|
||||||
@ -181,7 +222,7 @@ pub fn realize_gram(
|
|||||||
reg_scale: f64,
|
reg_scale: f64,
|
||||||
max_descent_steps: i32,
|
max_descent_steps: i32,
|
||||||
max_backoff_steps: i32
|
max_backoff_steps: i32
|
||||||
) -> (DMatrix<f64>, bool, DescentHistory) {
|
) -> (DMatrix<f64>, ConfigSubspace, bool, DescentHistory) {
|
||||||
// start the descent history
|
// start the descent history
|
||||||
let mut history = DescentHistory::new();
|
let mut history = DescentHistory::new();
|
||||||
|
|
||||||
@ -201,12 +242,8 @@ pub fn realize_gram(
|
|||||||
|
|
||||||
// use Newton's method with backtracking and gradient descent backup
|
// use Newton's method with backtracking and gradient descent backup
|
||||||
let mut state = SearchState::from_config(gram, guess);
|
let mut state = SearchState::from_config(gram, guess);
|
||||||
|
let mut hess = DMatrix::zeros(element_dim, assembly_dim);
|
||||||
for _ in 0..max_descent_steps {
|
for _ in 0..max_descent_steps {
|
||||||
// stop if the loss is tolerably low
|
|
||||||
history.config.push(state.config.clone());
|
|
||||||
history.scaled_loss.push(state.loss / scale_adjustment);
|
|
||||||
if state.loss < tol { break; }
|
|
||||||
|
|
||||||
// find the negative gradient of the loss function
|
// find the negative gradient of the loss function
|
||||||
let neg_grad = 4.0 * &*Q * &state.config * &state.err_proj;
|
let neg_grad = 4.0 * &*Q * &state.config * &state.err_proj;
|
||||||
let mut neg_grad_stacked = neg_grad.clone().reshape_generic(Dyn(total_dim), Const::<1>);
|
let mut neg_grad_stacked = neg_grad.clone().reshape_generic(Dyn(total_dim), Const::<1>);
|
||||||
@ -229,7 +266,7 @@ pub fn realize_gram(
|
|||||||
hess_cols.push(deriv_grad.reshape_generic(Dyn(total_dim), Const::<1>));
|
hess_cols.push(deriv_grad.reshape_generic(Dyn(total_dim), Const::<1>));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let mut hess = DMatrix::from_columns(hess_cols.as_slice());
|
hess = DMatrix::from_columns(hess_cols.as_slice());
|
||||||
|
|
||||||
// regularize the Hessian
|
// regularize the Hessian
|
||||||
let min_eigval = hess.symmetric_eigenvalues().min();
|
let min_eigval = hess.symmetric_eigenvalues().min();
|
||||||
@ -249,6 +286,11 @@ pub fn realize_gram(
|
|||||||
hess[(k, k)] = 1.0;
|
hess[(k, k)] = 1.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// stop if the loss is tolerably low
|
||||||
|
history.config.push(state.config.clone());
|
||||||
|
history.scaled_loss.push(state.loss / scale_adjustment);
|
||||||
|
if state.loss < tol { break; }
|
||||||
|
|
||||||
// compute the Newton step
|
// compute the Newton step
|
||||||
/*
|
/*
|
||||||
we need to either handle or eliminate the case where the minimum
|
we need to either handle or eliminate the case where the minimum
|
||||||
@ -256,7 +298,7 @@ pub fn realize_gram(
|
|||||||
singular. right now, this causes the Cholesky decomposition to return
|
singular. right now, this causes the Cholesky decomposition to return
|
||||||
`None`, leading to a panic when we unrap
|
`None`, leading to a panic when we unrap
|
||||||
*/
|
*/
|
||||||
let base_step_stacked = hess.cholesky().unwrap().solve(&neg_grad_stacked);
|
let base_step_stacked = hess.clone().cholesky().unwrap().solve(&neg_grad_stacked);
|
||||||
let base_step = base_step_stacked.reshape_generic(Dyn(element_dim), Dyn(assembly_dim));
|
let base_step = base_step_stacked.reshape_generic(Dyn(element_dim), Dyn(assembly_dim));
|
||||||
history.base_step.push(base_step.clone());
|
history.base_step.push(base_step.clone());
|
||||||
|
|
||||||
@ -269,10 +311,16 @@ pub fn realize_gram(
|
|||||||
state = better_state;
|
state = better_state;
|
||||||
history.backoff_steps.push(backoff_steps);
|
history.backoff_steps.push(backoff_steps);
|
||||||
},
|
},
|
||||||
None => return (state.config, false, history)
|
None => return (state.config, ConfigSubspace::zero(), false, history)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
(state.config, state.loss < tol, history)
|
let success = state.loss < tol;
|
||||||
|
let tangent = if success {
|
||||||
|
ConfigSubspace::symmetric_kernel(hess, assembly_dim)
|
||||||
|
} else {
|
||||||
|
ConfigSubspace::zero()
|
||||||
|
};
|
||||||
|
(state.config, tangent, success, history)
|
||||||
}
|
}
|
||||||
|
|
||||||
// --- tests ---
|
// --- tests ---
|
||||||
@ -291,7 +339,7 @@ pub mod irisawa {
|
|||||||
|
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
pub fn realize_irisawa_hexlet(scaled_tol: f64) -> (DMatrix<f64>, bool, DescentHistory) {
|
pub fn realize_irisawa_hexlet(scaled_tol: f64) -> (DMatrix<f64>, ConfigSubspace, bool, DescentHistory) {
|
||||||
let gram = {
|
let gram = {
|
||||||
let mut gram_to_be = PartialMatrix::new();
|
let mut gram_to_be = PartialMatrix::new();
|
||||||
for s in 0..9 {
|
for s in 0..9 {
|
||||||
@ -399,7 +447,7 @@ mod tests {
|
|||||||
fn irisawa_hexlet_test() {
|
fn irisawa_hexlet_test() {
|
||||||
// solve Irisawa's problem
|
// solve Irisawa's problem
|
||||||
const SCALED_TOL: f64 = 1.0e-12;
|
const SCALED_TOL: f64 = 1.0e-12;
|
||||||
let (config, _, _) = realize_irisawa_hexlet(SCALED_TOL);
|
let (config, _, _, _) = realize_irisawa_hexlet(SCALED_TOL);
|
||||||
|
|
||||||
// check against Irisawa's solution
|
// check against Irisawa's solution
|
||||||
let entry_tol = SCALED_TOL.sqrt();
|
let entry_tol = SCALED_TOL.sqrt();
|
||||||
@ -409,6 +457,61 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn tangent_test() {
|
||||||
|
const SCALED_TOL: f64 = 1.0e-12;
|
||||||
|
const ELEMENT_DIM: usize = 5;
|
||||||
|
const ASSEMBLY_DIM: usize = 3;
|
||||||
|
let gram = {
|
||||||
|
let mut gram_to_be = PartialMatrix::new();
|
||||||
|
for j in 0..3 {
|
||||||
|
for k in j..3 {
|
||||||
|
gram_to_be.push_sym(j, k, if j == k { 1.0 } else { -1.0 });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gram_to_be
|
||||||
|
};
|
||||||
|
let guess = DMatrix::from_columns(&[
|
||||||
|
sphere(0.0, 0.0, 0.0, -2.0),
|
||||||
|
sphere(0.0, 0.0, 1.0, 1.0),
|
||||||
|
sphere(0.0, 0.0, -1.0, 1.0)
|
||||||
|
]);
|
||||||
|
let frozen: [_; 5] = std::array::from_fn(|k| (k, 0));
|
||||||
|
let (config, tangent, success, history) = realize_gram(
|
||||||
|
&gram, guess.clone(), &frozen,
|
||||||
|
SCALED_TOL, 0.5, 0.9, 1.1, 200, 110
|
||||||
|
);
|
||||||
|
assert_eq!(config, guess);
|
||||||
|
assert_eq!(success, true);
|
||||||
|
assert_eq!(history.scaled_loss.len(), 1);
|
||||||
|
|
||||||
|
// confirm that the tangent space has dimension five or less
|
||||||
|
let ConfigSubspace(ref tangent_basis) = tangent;
|
||||||
|
assert_eq!(tangent_basis.len(), 5);
|
||||||
|
|
||||||
|
// confirm that the tangent space contains all the motions we expect it
|
||||||
|
// to. since we've already bounded the dimension of the tangent space,
|
||||||
|
// this confirms that the tangent space is what we expect it to be
|
||||||
|
let tangent_motions = vec![
|
||||||
|
basis_matrix((0, 1), ELEMENT_DIM, ASSEMBLY_DIM),
|
||||||
|
basis_matrix((1, 1), ELEMENT_DIM, ASSEMBLY_DIM),
|
||||||
|
basis_matrix((0, 2), ELEMENT_DIM, ASSEMBLY_DIM),
|
||||||
|
basis_matrix((1, 2), ELEMENT_DIM, ASSEMBLY_DIM),
|
||||||
|
DMatrix::<f64>::from_column_slice(ELEMENT_DIM, 3, &[
|
||||||
|
0.0, 0.0, 0.0, 0.0, 0.0,
|
||||||
|
0.0, 0.0, -1.0, -0.25, -1.0,
|
||||||
|
0.0, 0.0, -1.0, 0.25, 1.0
|
||||||
|
])
|
||||||
|
];
|
||||||
|
let tol_sq = ((ELEMENT_DIM * ASSEMBLY_DIM) as f64) * SCALED_TOL * SCALED_TOL;
|
||||||
|
for motion in tangent_motions {
|
||||||
|
let motion_proj: DMatrix<_> = motion.column_iter().enumerate().map(
|
||||||
|
|(k, v)| tangent.proj(&v, k)
|
||||||
|
).sum();
|
||||||
|
assert!((motion - motion_proj).norm_squared() < tol_sq);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// at the frozen indices, the optimization steps should have exact zeros,
|
// at the frozen indices, the optimization steps should have exact zeros,
|
||||||
// and the realized configuration should match the initial guess
|
// and the realized configuration should match the initial guess
|
||||||
#[test]
|
#[test]
|
||||||
@ -428,7 +531,7 @@ mod tests {
|
|||||||
]);
|
]);
|
||||||
let frozen = [(3, 0), (3, 1)];
|
let frozen = [(3, 0), (3, 1)];
|
||||||
println!();
|
println!();
|
||||||
let (config, success, history) = realize_gram(
|
let (config, _, success, history) = realize_gram(
|
||||||
&gram, guess.clone(), &frozen,
|
&gram, guess.clone(), &frozen,
|
||||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||||
);
|
);
|
||||||
|
Loading…
Reference in New Issue
Block a user