Compare commits
101 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
5e117d5877 | ||
|
517fd327fa | ||
|
f1690b62e1 | ||
|
cca5a781c4 | ||
|
abe231126d | ||
|
ee1c691787 | ||
|
19907838ce | ||
|
e3120f7109 | ||
|
18ebf3be2c | ||
|
edace8e4ea | ||
|
70bd39b9e5 | ||
|
25fa108e9b | ||
|
7977b11caf | ||
|
1c9fec36e5 | ||
|
721a8716d4 | ||
|
4f8f36053f | ||
|
28b1ecb8e9 | ||
|
b08dbd6f93 | ||
|
bd0982f821 | ||
|
2444649dd1 | ||
|
b3afd6f555 | ||
|
9b39fe56b8 | ||
|
f5486fb0dd | ||
|
4e3c86fb71 | ||
|
7ff1b9cb65 | ||
|
e6281cdcc6 | ||
|
fc85d15f83 | ||
|
7709c61f71 | ||
|
edee153e37 | ||
|
4a24a01928 | ||
|
050e2373a6 | ||
|
147e275823 | ||
|
d121385c18 | ||
|
78f8ef8215 | ||
|
96f8b6b5f3 | ||
|
96afad0c97 | ||
|
a60624884a | ||
|
93190e99da | ||
|
e2d3af2867 | ||
|
7cb01bab82 | ||
|
f47be08d98 | ||
|
cd18d594e0 | ||
|
49655a8d62 | ||
|
959e4cc8b5 | ||
|
49170671b4 | ||
|
0c2869d3f3 | ||
|
e6d1e0b865 | ||
|
d481181ef8 | ||
|
20b96a9764 | ||
|
634e97b659 | ||
|
336b940471 | ||
|
d3c9a08d22 | ||
|
aceac5e5c4 | ||
|
20d072d615 | ||
|
c67f37c934 | ||
|
2efc08d6c0 | ||
|
69ab888d5b | ||
|
0173b63e19 | ||
|
b289d2d4c3 | ||
|
163361184b | ||
|
ab830b194e | ||
|
3493a798d1 | ||
|
121934c4c3 | ||
|
a4236a34df | ||
|
f148552964 | ||
|
6db9f5be6c | ||
|
3a721a4cc8 | ||
|
8fde202911 | ||
|
4afc82034b | ||
|
e80adf831d | ||
|
3d7ee98dd6 | ||
|
c04e29f586 | ||
|
01c2af6615 | ||
|
a40a110788 | ||
|
f62f44b5a7 | ||
|
ec48592ef1 | ||
|
5e9c5db231 | ||
|
bf140efaf7 | ||
|
cbec31f5df | ||
|
b9370ceb41 | ||
|
85db7b9be0 | ||
|
c5fe725b1b | ||
|
5bf23fa789 | ||
|
206a2df480 | ||
|
c18cac642b | ||
|
8798683d25 | ||
|
b9587872d3 | ||
|
766d56027c | ||
|
25da6ca062 | ||
|
e3df765f16 | ||
|
f1029b3102 | ||
|
87763fc458 | ||
|
2ef0fdd3e2 | ||
|
d2cecf69db | ||
|
c78a041dc7 | ||
|
f274119da6 | ||
|
1fbeb23194 | ||
|
80b210e667 | ||
|
81f9b8e040 | ||
|
5885189b04 | ||
|
fd3cbae1b4 |
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "dyna3"
|
||||
name = "sketch-outline"
|
||||
version = "0.1.0"
|
||||
authors = ["Aaron Fenyes", "Glen Whitney"]
|
||||
authors = ["Aaron"]
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
@ -10,7 +10,6 @@ default = ["console_error_panic_hook"]
|
||||
[dependencies]
|
||||
itertools = "0.13.0"
|
||||
js-sys = "0.3.70"
|
||||
lazy_static = "1.5.0"
|
||||
nalgebra = "0.33.0"
|
||||
rustc-hash = "2.0.0"
|
||||
slab = "0.4.9"
|
||||
@ -26,7 +25,6 @@ console_error_panic_hook = { version = "0.1.7", optional = true }
|
||||
version = "0.3.69"
|
||||
features = [
|
||||
'HtmlCanvasElement',
|
||||
'HtmlInputElement',
|
||||
'Performance',
|
||||
'WebGl2RenderingContext',
|
||||
'WebGlBuffer',
|
||||
|
@ -2,10 +2,8 @@
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="utf-8"/>
|
||||
<title>dyna3</title>
|
||||
<title>Sketch outline</title>
|
||||
<link data-trunk rel="css" href="main.css"/>
|
||||
<link href="https://fonts.bunny.net/css?family=fira-sans:ital,wght@0,400;1,400&display=swap" rel="stylesheet">
|
||||
<link href="https://fonts.bunny.net/css?family=noto-emoji:wght@400&text=%f0%9f%94%97%e2%9a%a0&display=swap" rel="stylesheet">
|
||||
</head>
|
||||
<body></body>
|
||||
</html>
|
||||
|
@ -1,20 +1,7 @@
|
||||
:root {
|
||||
--text: #fcfcfc; /* almost white */
|
||||
--text-bright: white;
|
||||
--text-invalid: #f58fc2; /* bright pink */
|
||||
--border: #555; /* light gray */
|
||||
--border-focus: #aaa; /* bright gray */
|
||||
--border-invalid: #70495c; /* dusky pink */
|
||||
--selection-highlight: #444; /* medium gray */
|
||||
--page-background: #222; /* dark gray */
|
||||
--display-background: #020202; /* almost black */
|
||||
}
|
||||
|
||||
body {
|
||||
margin: 0px;
|
||||
color: var(--text);
|
||||
background-color: var(--page-background);
|
||||
font-family: 'Fira Sans', sans-serif;
|
||||
color: #fcfcfc;
|
||||
background-color: #222;
|
||||
}
|
||||
|
||||
/* sidebar */
|
||||
@ -29,7 +16,7 @@ body {
|
||||
padding: 0px;
|
||||
border-width: 0px 1px 0px 0px;
|
||||
border-style: solid;
|
||||
border-color: var(--border);
|
||||
border-color: #555;
|
||||
}
|
||||
|
||||
/* add-remove */
|
||||
@ -46,15 +33,6 @@ body {
|
||||
font-size: large;
|
||||
}
|
||||
|
||||
/* KLUDGE */
|
||||
/*
|
||||
for convenience, we're using emoji as temporary icons for some buttons. these
|
||||
buttons need to be displayed in an emoji font
|
||||
*/
|
||||
#add-remove > button.emoji {
|
||||
font-family: 'Noto Emoji', sans-serif;
|
||||
}
|
||||
|
||||
/* outline */
|
||||
|
||||
#outline {
|
||||
@ -73,103 +51,74 @@ summary {
|
||||
}
|
||||
|
||||
summary.selected {
|
||||
color: var(--text-bright);
|
||||
background-color: var(--selection-highlight);
|
||||
color: #fff;
|
||||
background-color: #444;
|
||||
}
|
||||
|
||||
summary > div, .constraint {
|
||||
summary > div, .cst {
|
||||
padding-top: 4px;
|
||||
padding-bottom: 4px;
|
||||
}
|
||||
|
||||
.element, .constraint {
|
||||
.elt, .cst {
|
||||
display: flex;
|
||||
flex-grow: 1;
|
||||
padding-left: 8px;
|
||||
padding-right: 8px;
|
||||
}
|
||||
|
||||
.element-switch {
|
||||
.elt-switch {
|
||||
width: 18px;
|
||||
padding-left: 2px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
details:has(li) .element-switch::after {
|
||||
details:has(li) .elt-switch::after {
|
||||
content: '▸';
|
||||
}
|
||||
|
||||
details[open]:has(li) .element-switch::after {
|
||||
details[open]:has(li) .elt-switch::after {
|
||||
content: '▾';
|
||||
}
|
||||
|
||||
.element-label {
|
||||
.elt-label {
|
||||
flex-grow: 1;
|
||||
}
|
||||
|
||||
.constraint-label {
|
||||
.cst-label {
|
||||
flex-grow: 1;
|
||||
}
|
||||
|
||||
.element-representation {
|
||||
.elt-rep {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.element-representation > div {
|
||||
.elt-rep > div, .cst-rep {
|
||||
padding: 2px 0px 0px 0px;
|
||||
font-size: 10pt;
|
||||
font-variant-numeric: tabular-nums;
|
||||
text-align: right;
|
||||
text-align: center;
|
||||
width: 56px;
|
||||
}
|
||||
|
||||
.constraint {
|
||||
.cst {
|
||||
font-style: italic;
|
||||
}
|
||||
|
||||
.constraint.invalid {
|
||||
color: var(--text-invalid);
|
||||
}
|
||||
|
||||
.constraint > input[type=checkbox] {
|
||||
.cst > input {
|
||||
margin: 0px 8px 0px 0px;
|
||||
}
|
||||
|
||||
.constraint > input[type=text] {
|
||||
color: inherit;
|
||||
background-color: inherit;
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 2px;
|
||||
}
|
||||
|
||||
.constraint.invalid > input[type=text] {
|
||||
border-color: var(--border-invalid);
|
||||
}
|
||||
|
||||
.status {
|
||||
width: 20px;
|
||||
padding-left: 4px;
|
||||
text-align: center;
|
||||
font-family: 'Noto Emoji';
|
||||
font-style: normal;
|
||||
}
|
||||
|
||||
.invalid > .status::after, details:has(.invalid):not([open]) .status::after {
|
||||
content: '⚠';
|
||||
color: var(--text-invalid);
|
||||
}
|
||||
|
||||
/* display */
|
||||
|
||||
canvas {
|
||||
float: left;
|
||||
margin-left: 20px;
|
||||
margin-top: 20px;
|
||||
background-color: var(--display-background);
|
||||
border: 1px solid var(--border);
|
||||
background-color: #020202;
|
||||
border: 1px solid #555;
|
||||
border-radius: 16px;
|
||||
}
|
||||
|
||||
canvas:focus {
|
||||
border-color: var(--border-focus);
|
||||
border-color: #aaa;
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
# based on "Enabling print statements in Cargo tests", by Jon Almeida
|
||||
#
|
||||
# https://jonalmeida.com/posts/2015/01/23/print-cargo/
|
||||
#
|
||||
|
||||
cargo test -- --nocapture engine::tests::irisawa_hexlet_test
|
||||
cargo test -- --nocapture engine::tests::three_spheres_example
|
||||
cargo test -- --nocapture engine::tests::point_on_sphere_example
|
@ -1,130 +1,151 @@
|
||||
use std::collections::BTreeSet; /* DEBUG */
|
||||
use sycamore::prelude::*;
|
||||
use web_sys::{console, wasm_bindgen::JsValue};
|
||||
|
||||
use crate::{engine, AppState, assembly::{Assembly, Constraint, Element}};
|
||||
|
||||
/* DEBUG */
|
||||
// load an example assembly for testing. this code will be removed once we've
|
||||
// built a more formal test assembly system
|
||||
fn load_gen_assemb(assembly: &Assembly) {
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
String::from("gemini_a"),
|
||||
String::from("Castor"),
|
||||
[1.00_f32, 0.25_f32, 0.00_f32],
|
||||
engine::sphere(0.5, 0.5, 0.0, 1.0)
|
||||
)
|
||||
Element {
|
||||
id: String::from("gemini_a"),
|
||||
label: String::from("Castor"),
|
||||
color: [1.00_f32, 0.25_f32, 0.00_f32],
|
||||
rep: engine::sphere(0.5, 0.5, 0.0, 1.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
String::from("gemini_b"),
|
||||
String::from("Pollux"),
|
||||
[0.00_f32, 0.25_f32, 1.00_f32],
|
||||
engine::sphere(-0.5, -0.5, 0.0, 1.0)
|
||||
)
|
||||
Element {
|
||||
id: String::from("gemini_b"),
|
||||
label: String::from("Pollux"),
|
||||
color: [0.00_f32, 0.25_f32, 1.00_f32],
|
||||
rep: engine::sphere(-0.5, -0.5, 0.0, 1.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
String::from("ursa_major"),
|
||||
String::from("Ursa major"),
|
||||
[0.25_f32, 0.00_f32, 1.00_f32],
|
||||
engine::sphere(-0.5, 0.5, 0.0, 0.75)
|
||||
)
|
||||
Element {
|
||||
id: String::from("ursa_major"),
|
||||
label: String::from("Ursa major"),
|
||||
color: [0.25_f32, 0.00_f32, 1.00_f32],
|
||||
rep: engine::sphere(-0.5, 0.5, 0.0, 0.75),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
String::from("ursa_minor"),
|
||||
String::from("Ursa minor"),
|
||||
[0.25_f32, 1.00_f32, 0.00_f32],
|
||||
engine::sphere(0.5, -0.5, 0.0, 0.5)
|
||||
)
|
||||
Element {
|
||||
id: String::from("ursa_minor"),
|
||||
label: String::from("Ursa minor"),
|
||||
color: [0.25_f32, 1.00_f32, 0.00_f32],
|
||||
rep: engine::sphere(0.5, -0.5, 0.0, 0.5),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
String::from("moon_deimos"),
|
||||
String::from("Deimos"),
|
||||
[0.75_f32, 0.75_f32, 0.00_f32],
|
||||
engine::sphere(0.0, 0.15, 1.0, 0.25)
|
||||
)
|
||||
Element {
|
||||
id: String::from("moon_deimos"),
|
||||
label: String::from("Deimos"),
|
||||
color: [0.75_f32, 0.75_f32, 0.00_f32],
|
||||
rep: engine::sphere(0.0, 0.15, 1.0, 0.25),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
String::from("moon_phobos"),
|
||||
String::from("Phobos"),
|
||||
[0.00_f32, 0.75_f32, 0.50_f32],
|
||||
engine::sphere(0.0, -0.15, -1.0, 0.25)
|
||||
)
|
||||
Element {
|
||||
id: String::from("moon_phobos"),
|
||||
label: String::from("Phobos"),
|
||||
color: [0.00_f32, 0.75_f32, 0.50_f32],
|
||||
rep: engine::sphere(0.0, -0.15, -1.0, 0.25),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
assembly.insert_constraint(
|
||||
Constraint {
|
||||
args: (
|
||||
assembly.elements_by_id.with_untracked(|elts_by_id| elts_by_id["gemini_a"]),
|
||||
assembly.elements_by_id.with_untracked(|elts_by_id| elts_by_id["gemini_b"])
|
||||
),
|
||||
rep: 0.5,
|
||||
active: create_signal(true)
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/* DEBUG */
|
||||
// load an example assembly for testing. this code will be removed once we've
|
||||
// built a more formal test assembly system
|
||||
fn load_low_curv_assemb(assembly: &Assembly) {
|
||||
let a = 0.75_f64.sqrt();
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
"central".to_string(),
|
||||
"Central".to_string(),
|
||||
[0.75_f32, 0.75_f32, 0.75_f32],
|
||||
engine::sphere(0.0, 0.0, 0.0, 1.0)
|
||||
)
|
||||
Element {
|
||||
id: "central".to_string(),
|
||||
label: "Central".to_string(),
|
||||
color: [0.75_f32, 0.75_f32, 0.75_f32],
|
||||
rep: engine::sphere(0.0, 0.0, 0.0, 1.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
"assemb_plane".to_string(),
|
||||
"Assembly plane".to_string(),
|
||||
[0.75_f32, 0.75_f32, 0.75_f32],
|
||||
engine::sphere_with_offset(0.0, 0.0, 1.0, 0.0, 0.0)
|
||||
)
|
||||
Element {
|
||||
id: "assemb_plane".to_string(),
|
||||
label: "Assembly plane".to_string(),
|
||||
color: [0.75_f32, 0.75_f32, 0.75_f32],
|
||||
rep: engine::sphere_with_offset(0.0, 0.0, 1.0, 0.0, 0.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
"side1".to_string(),
|
||||
"Side 1".to_string(),
|
||||
[1.00_f32, 0.00_f32, 0.25_f32],
|
||||
engine::sphere_with_offset(1.0, 0.0, 0.0, 1.0, 0.0)
|
||||
)
|
||||
Element {
|
||||
id: "side1".to_string(),
|
||||
label: "Side 1".to_string(),
|
||||
color: [1.00_f32, 0.00_f32, 0.25_f32],
|
||||
rep: engine::sphere_with_offset(1.0, 0.0, 0.0, 1.0, 0.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
"side2".to_string(),
|
||||
"Side 2".to_string(),
|
||||
[0.25_f32, 1.00_f32, 0.00_f32],
|
||||
engine::sphere_with_offset(-0.5, a, 0.0, 1.0, 0.0)
|
||||
)
|
||||
Element {
|
||||
id: "side2".to_string(),
|
||||
label: "Side 2".to_string(),
|
||||
color: [0.25_f32, 1.00_f32, 0.00_f32],
|
||||
rep: engine::sphere_with_offset(-0.5, a, 0.0, 1.0, 0.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
"side3".to_string(),
|
||||
"Side 3".to_string(),
|
||||
[0.00_f32, 0.25_f32, 1.00_f32],
|
||||
engine::sphere_with_offset(-0.5, -a, 0.0, 1.0, 0.0)
|
||||
)
|
||||
Element {
|
||||
id: "side3".to_string(),
|
||||
label: "Side 3".to_string(),
|
||||
color: [0.00_f32, 0.25_f32, 1.00_f32],
|
||||
rep: engine::sphere_with_offset(-0.5, -a, 0.0, 1.0, 0.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
"corner1".to_string(),
|
||||
"Corner 1".to_string(),
|
||||
[0.75_f32, 0.75_f32, 0.75_f32],
|
||||
engine::sphere(-4.0/3.0, 0.0, 0.0, 1.0/3.0)
|
||||
)
|
||||
Element {
|
||||
id: "corner1".to_string(),
|
||||
label: "Corner 1".to_string(),
|
||||
color: [0.75_f32, 0.75_f32, 0.75_f32],
|
||||
rep: engine::sphere(-4.0/3.0, 0.0, 0.0, 1.0/3.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
"corner2".to_string(),
|
||||
"Corner 2".to_string(),
|
||||
[0.75_f32, 0.75_f32, 0.75_f32],
|
||||
engine::sphere(2.0/3.0, -4.0/3.0 * a, 0.0, 1.0/3.0)
|
||||
)
|
||||
Element {
|
||||
id: "corner2".to_string(),
|
||||
label: "Corner 2".to_string(),
|
||||
color: [0.75_f32, 0.75_f32, 0.75_f32],
|
||||
rep: engine::sphere(2.0/3.0, -4.0/3.0 * a, 0.0, 1.0/3.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
let _ = assembly.try_insert_element(
|
||||
Element::new(
|
||||
String::from("corner3"),
|
||||
String::from("Corner 3"),
|
||||
[0.75_f32, 0.75_f32, 0.75_f32],
|
||||
engine::sphere(2.0/3.0, 4.0/3.0 * a, 0.0, 1.0/3.0)
|
||||
)
|
||||
Element {
|
||||
id: String::from("corner3"),
|
||||
label: String::from("Corner 3"),
|
||||
color: [0.75_f32, 0.75_f32, 0.75_f32],
|
||||
rep: engine::sphere(2.0/3.0, 4.0/3.0 * a, 0.0, 1.0/3.0),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
@ -177,63 +198,44 @@ pub fn AddRemove() -> View {
|
||||
}
|
||||
) { "+" }
|
||||
button(
|
||||
class="emoji", /* KLUDGE */ // for convenience, we're using an emoji as a temporary icon for this button
|
||||
disabled={
|
||||
let state = use_context::<AppState>();
|
||||
state.selection.with(|sel| sel.len() != 2)
|
||||
},
|
||||
on:click=|_| {
|
||||
let state = use_context::<AppState>();
|
||||
let subjects = state.selection.with(
|
||||
let args = state.selection.with(
|
||||
|sel| {
|
||||
let subject_vec: Vec<_> = sel.into_iter().collect();
|
||||
(subject_vec[0].clone(), subject_vec[1].clone())
|
||||
let arg_vec: Vec<_> = sel.into_iter().collect();
|
||||
(arg_vec[0].clone(), arg_vec[1].clone())
|
||||
}
|
||||
);
|
||||
let lorentz_prod = create_signal(0.0);
|
||||
let lorentz_prod_valid = create_signal(false);
|
||||
let active = create_signal(true);
|
||||
state.assembly.insert_constraint(Constraint {
|
||||
subjects: subjects,
|
||||
lorentz_prod: lorentz_prod,
|
||||
lorentz_prod_text: create_signal(String::new()),
|
||||
lorentz_prod_valid: lorentz_prod_valid,
|
||||
active: active,
|
||||
args: args,
|
||||
rep: 0.0,
|
||||
active: create_signal(true)
|
||||
});
|
||||
state.selection.update(|sel| sel.clear());
|
||||
|
||||
/* DEBUG */
|
||||
// print updated constraint list
|
||||
console::log_1(&JsValue::from("Constraints:"));
|
||||
console::log_1(&JsValue::from("constraints:"));
|
||||
state.assembly.constraints.with(|csts| {
|
||||
for (_, cst) in csts.into_iter() {
|
||||
console::log_5(
|
||||
&JsValue::from(" "),
|
||||
&JsValue::from(cst.subjects.0),
|
||||
&JsValue::from(cst.subjects.1),
|
||||
&JsValue::from(cst.args.0),
|
||||
&JsValue::from(cst.args.1),
|
||||
&JsValue::from(":"),
|
||||
&JsValue::from(cst.lorentz_prod.get_untracked())
|
||||
&JsValue::from(cst.rep)
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// update the realization when the constraint becomes active
|
||||
// and valid, or is edited while active and valid
|
||||
create_effect(move || {
|
||||
console::log_1(&JsValue::from(
|
||||
format!("Constraint ({}, {}) updated", subjects.0, subjects.1)
|
||||
));
|
||||
lorentz_prod.track();
|
||||
if active.get() && lorentz_prod_valid.get() {
|
||||
state.assembly.realize();
|
||||
}
|
||||
});
|
||||
}
|
||||
) { "🔗" }
|
||||
select(bind:value=assembly_name) { /* DEBUG */ // example assembly chooser
|
||||
select(bind:value=assembly_name) { /* DEBUG */
|
||||
option(value="general") { "General" }
|
||||
option(value="low-curv") { "Low-curvature" }
|
||||
option(value="empty") { "Empty" }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,56 +1,22 @@
|
||||
use nalgebra::{DMatrix, DVector};
|
||||
use nalgebra::DVector;
|
||||
use rustc_hash::FxHashMap;
|
||||
use slab::Slab;
|
||||
use std::collections::BTreeSet;
|
||||
use sycamore::prelude::*;
|
||||
use web_sys::{console, wasm_bindgen::JsValue}; /* DEBUG */
|
||||
|
||||
use crate::engine::{realize_gram, PartialMatrix};
|
||||
|
||||
// the types of the keys we use to access an assembly's elements and constraints
|
||||
pub type ElementKey = usize;
|
||||
pub type ConstraintKey = usize;
|
||||
|
||||
pub type ElementColor = [f32; 3];
|
||||
|
||||
#[derive(Clone, PartialEq)]
|
||||
pub struct Element {
|
||||
pub id: String,
|
||||
pub label: String,
|
||||
pub color: ElementColor,
|
||||
pub representation: Signal<DVector<f64>>,
|
||||
pub constraints: Signal<BTreeSet<ConstraintKey>>,
|
||||
|
||||
// the configuration matrix column index that was assigned to this element
|
||||
// last time the assembly was realized
|
||||
column_index: usize
|
||||
pub color: [f32; 3],
|
||||
pub rep: DVector<f64>,
|
||||
pub constraints: BTreeSet<usize>
|
||||
}
|
||||
|
||||
impl Element {
|
||||
pub fn new(
|
||||
id: String,
|
||||
label: String,
|
||||
color: ElementColor,
|
||||
representation: DVector<f64>
|
||||
) -> Element {
|
||||
Element {
|
||||
id: id,
|
||||
label: label,
|
||||
color: color,
|
||||
representation: create_signal(representation),
|
||||
constraints: create_signal(BTreeSet::default()),
|
||||
column_index: 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Constraint {
|
||||
pub subjects: (ElementKey, ElementKey),
|
||||
pub lorentz_prod: Signal<f64>,
|
||||
pub lorentz_prod_text: Signal<String>,
|
||||
pub lorentz_prod_valid: Signal<bool>,
|
||||
pub args: (usize, usize),
|
||||
pub rep: f64,
|
||||
pub active: Signal<bool>
|
||||
}
|
||||
|
||||
@ -62,7 +28,7 @@ pub struct Assembly {
|
||||
pub constraints: Signal<Slab<Constraint>>,
|
||||
|
||||
// indexing
|
||||
pub elements_by_id: Signal<FxHashMap<String, ElementKey>>
|
||||
pub elements_by_id: Signal<FxHashMap<String, usize>>
|
||||
}
|
||||
|
||||
impl Assembly {
|
||||
@ -74,8 +40,6 @@ impl Assembly {
|
||||
}
|
||||
}
|
||||
|
||||
// --- inserting elements and constraints ---
|
||||
|
||||
// insert an element into the assembly without checking whether we already
|
||||
// have an element with the same identifier. any element that does have the
|
||||
// same identifier will get kicked out of the `elements_by_id` index
|
||||
@ -108,103 +72,22 @@ impl Assembly {
|
||||
|
||||
// create and insert a new element
|
||||
self.insert_element_unchecked(
|
||||
Element::new(
|
||||
id,
|
||||
format!("Sphere {}", id_num),
|
||||
[0.75_f32, 0.75_f32, 0.75_f32],
|
||||
DVector::<f64>::from_column_slice(&[0.0, 0.0, 0.0, 0.5, -0.5])
|
||||
)
|
||||
Element {
|
||||
id: id,
|
||||
label: format!("Sphere {}", id_num),
|
||||
color: [0.75_f32, 0.75_f32, 0.75_f32],
|
||||
rep: DVector::<f64>::from_column_slice(&[0.0, 0.0, 0.0, 0.5, -0.5]),
|
||||
constraints: BTreeSet::default()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
pub fn insert_constraint(&self, constraint: Constraint) {
|
||||
let subjects = constraint.subjects;
|
||||
let args = constraint.args;
|
||||
let key = self.constraints.update(|csts| csts.insert(constraint));
|
||||
let subject_constraints = self.elements.with(
|
||||
|elts| (elts[subjects.0].constraints, elts[subjects.1].constraints)
|
||||
);
|
||||
subject_constraints.0.update(|csts| csts.insert(key));
|
||||
subject_constraints.1.update(|csts| csts.insert(key));
|
||||
}
|
||||
|
||||
// --- realization ---
|
||||
|
||||
pub fn realize(&self) {
|
||||
// index the elements
|
||||
self.elements.update_silent(|elts| {
|
||||
for (index, (_, elt)) in elts.into_iter().enumerate() {
|
||||
elt.column_index = index;
|
||||
}
|
||||
});
|
||||
|
||||
// set up the Gram matrix and the initial configuration matrix
|
||||
let (gram, guess) = self.elements.with_untracked(|elts| {
|
||||
// set up the off-diagonal part of the Gram matrix
|
||||
let mut gram_to_be = PartialMatrix::new();
|
||||
self.constraints.with_untracked(|csts| {
|
||||
for (_, cst) in csts {
|
||||
if cst.active.get_untracked() && cst.lorentz_prod_valid.get_untracked() {
|
||||
let subjects = cst.subjects;
|
||||
let row = elts[subjects.0].column_index;
|
||||
let col = elts[subjects.1].column_index;
|
||||
gram_to_be.push_sym(row, col, cst.lorentz_prod.get_untracked());
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// set up the initial configuration matrix and the diagonal of the
|
||||
// Gram matrix
|
||||
let mut guess_to_be = DMatrix::<f64>::zeros(5, elts.len());
|
||||
for (_, elt) in elts {
|
||||
let index = elt.column_index;
|
||||
gram_to_be.push_sym(index, index, 1.0);
|
||||
guess_to_be.set_column(index, &elt.representation.get_clone_untracked());
|
||||
}
|
||||
|
||||
(gram_to_be, guess_to_be)
|
||||
});
|
||||
|
||||
/* DEBUG */
|
||||
// log the Gram matrix
|
||||
console::log_1(&JsValue::from("Gram matrix:"));
|
||||
gram.log_to_console();
|
||||
|
||||
/* DEBUG */
|
||||
// log the initial configuration matrix
|
||||
console::log_1(&JsValue::from("Old configuration:"));
|
||||
for j in 0..guess.nrows() {
|
||||
let mut row_str = String::new();
|
||||
for k in 0..guess.ncols() {
|
||||
row_str.push_str(format!(" {:>8.3}", guess[(j, k)]).as_str());
|
||||
}
|
||||
console::log_1(&JsValue::from(row_str));
|
||||
}
|
||||
|
||||
// look for a configuration with the given Gram matrix
|
||||
let (config, success, history) = realize_gram(
|
||||
&gram, guess, &[],
|
||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||
);
|
||||
|
||||
/* DEBUG */
|
||||
// report the outcome of the search
|
||||
console::log_1(&JsValue::from(
|
||||
if success {
|
||||
"Target accuracy achieved!"
|
||||
} else {
|
||||
"Failed to reach target accuracy"
|
||||
}
|
||||
));
|
||||
console::log_2(&JsValue::from("Steps:"), &JsValue::from(history.scaled_loss.len() - 1));
|
||||
console::log_2(&JsValue::from("Loss:"), &JsValue::from(*history.scaled_loss.last().unwrap()));
|
||||
|
||||
if success {
|
||||
// read out the solution
|
||||
for (_, elt) in self.elements.get_clone_untracked() {
|
||||
elt.representation.update(
|
||||
|rep| rep.set_column(0, &config.column(elt.column_index))
|
||||
);
|
||||
}
|
||||
}
|
||||
self.elements.update(|elts| {
|
||||
elts[args.0].constraints.insert(key);
|
||||
elts[args.1].constraints.insert(key);
|
||||
})
|
||||
}
|
||||
}
|
@ -103,11 +103,7 @@ pub fn Display() -> View {
|
||||
// change listener
|
||||
let scene_changed = create_signal(true);
|
||||
create_effect(move || {
|
||||
state.assembly.elements.with(|elts| {
|
||||
for (_, elt) in elts {
|
||||
elt.representation.track();
|
||||
}
|
||||
});
|
||||
state.assembly.elements.track();
|
||||
state.selection.track();
|
||||
scene_changed.set(true);
|
||||
});
|
||||
@ -299,40 +295,23 @@ pub fn Display() -> View {
|
||||
let assembly_to_world = &location * &orientation;
|
||||
|
||||
// get the assembly
|
||||
let (
|
||||
elt_cnt,
|
||||
reps_world,
|
||||
colors,
|
||||
highlights
|
||||
) = state.assembly.elements.with(|elts| {
|
||||
(
|
||||
// number of elements
|
||||
elts.len() as i32,
|
||||
|
||||
// representation vectors in world coordinates
|
||||
elts.iter().map(
|
||||
|(_, elt)| elt.representation.with(|rep| &assembly_to_world * rep)
|
||||
).collect::<Vec<_>>(),
|
||||
|
||||
// colors
|
||||
elts.iter().map(|(key, elt)| {
|
||||
if state.selection.with(|sel| sel.contains(&key)) {
|
||||
elt.color.map(|ch| 0.2 + 0.8*ch)
|
||||
} else {
|
||||
elt.color
|
||||
}
|
||||
}).collect::<Vec<_>>(),
|
||||
|
||||
// highlight levels
|
||||
elts.iter().map(|(key, _)| {
|
||||
if state.selection.with(|sel| sel.contains(&key)) {
|
||||
1.0_f32
|
||||
} else {
|
||||
HIGHLIGHT
|
||||
}
|
||||
}).collect::<Vec<_>>()
|
||||
)
|
||||
});
|
||||
let elements = state.assembly.elements.get_clone();
|
||||
let element_iter = (&elements).into_iter();
|
||||
let reps_world: Vec<_> = element_iter.clone().map(|(_, elt)| &assembly_to_world * &elt.rep).collect();
|
||||
let colors: Vec<_> = element_iter.clone().map(|(key, elt)|
|
||||
if state.selection.with(|sel| sel.contains(&key)) {
|
||||
elt.color.map(|ch| 0.2 + 0.8*ch)
|
||||
} else {
|
||||
elt.color
|
||||
}
|
||||
).collect();
|
||||
let highlights: Vec<_> = element_iter.map(|(key, _)|
|
||||
if state.selection.with(|sel| sel.contains(&key)) {
|
||||
1.0_f32
|
||||
} else {
|
||||
HIGHLIGHT
|
||||
}
|
||||
).collect();
|
||||
|
||||
// set the resolution
|
||||
let width = canvas.width() as f32;
|
||||
@ -341,7 +320,7 @@ pub fn Display() -> View {
|
||||
ctx.uniform1f(shortdim_loc.as_ref(), width.min(height));
|
||||
|
||||
// pass the assembly
|
||||
ctx.uniform1i(sphere_cnt_loc.as_ref(), elt_cnt);
|
||||
ctx.uniform1i(sphere_cnt_loc.as_ref(), elements.len() as i32);
|
||||
for n in 0..reps_world.len() {
|
||||
let v = &reps_world[n];
|
||||
ctx.uniform3f(
|
||||
|
@ -1,13 +1,4 @@
|
||||
use lazy_static::lazy_static;
|
||||
use nalgebra::{Const, DMatrix, DVector, Dyn};
|
||||
use web_sys::{console, wasm_bindgen::JsValue}; /* DEBUG */
|
||||
|
||||
// --- elements ---
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn point(x: f64, y: f64, z: f64) -> DVector<f64> {
|
||||
DVector::from_column_slice(&[x, y, z, 0.5, 0.5*(x*x + y*y + z*z)])
|
||||
}
|
||||
use nalgebra::DVector;
|
||||
|
||||
// the sphere with the given center and radius, with inward-pointing normals
|
||||
pub fn sphere(center_x: f64, center_y: f64, center_z: f64, radius: f64) -> DVector<f64> {
|
||||
@ -33,508 +24,4 @@ pub fn sphere_with_offset(dir_x: f64, dir_y: f64, dir_z: f64, off: f64, curv: f6
|
||||
0.5 * curv,
|
||||
off * (1.0 + 0.5 * off * curv)
|
||||
])
|
||||
}
|
||||
|
||||
// --- partial matrices ---
|
||||
|
||||
struct MatrixEntry {
|
||||
index: (usize, usize),
|
||||
value: f64
|
||||
}
|
||||
|
||||
pub struct PartialMatrix(Vec<MatrixEntry>);
|
||||
|
||||
impl PartialMatrix {
|
||||
pub fn new() -> PartialMatrix {
|
||||
PartialMatrix(Vec::<MatrixEntry>::new())
|
||||
}
|
||||
|
||||
pub fn push_sym(&mut self, row: usize, col: usize, value: f64) {
|
||||
let PartialMatrix(entries) = self;
|
||||
entries.push(MatrixEntry { index: (row, col), value: value });
|
||||
if row != col {
|
||||
entries.push(MatrixEntry { index: (col, row), value: value });
|
||||
}
|
||||
}
|
||||
|
||||
/* DEBUG */
|
||||
pub fn log_to_console(&self) {
|
||||
let PartialMatrix(entries) = self;
|
||||
for ent in entries {
|
||||
let ent_str = format!(" {} {} {}", ent.index.0, ent.index.1, ent.value);
|
||||
console::log_1(&JsValue::from(ent_str.as_str()));
|
||||
}
|
||||
}
|
||||
|
||||
fn proj(&self, a: &DMatrix<f64>) -> DMatrix<f64> {
|
||||
let mut result = DMatrix::<f64>::zeros(a.nrows(), a.ncols());
|
||||
let PartialMatrix(entries) = self;
|
||||
for ent in entries {
|
||||
result[ent.index] = a[ent.index];
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn sub_proj(&self, rhs: &DMatrix<f64>) -> DMatrix<f64> {
|
||||
let mut result = DMatrix::<f64>::zeros(rhs.nrows(), rhs.ncols());
|
||||
let PartialMatrix(entries) = self;
|
||||
for ent in entries {
|
||||
result[ent.index] = ent.value - rhs[ent.index];
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
// --- descent history ---
|
||||
|
||||
pub struct DescentHistory {
|
||||
pub config: Vec<DMatrix<f64>>,
|
||||
pub scaled_loss: Vec<f64>,
|
||||
pub neg_grad: Vec<DMatrix<f64>>,
|
||||
pub min_eigval: Vec<f64>,
|
||||
pub base_step: Vec<DMatrix<f64>>,
|
||||
pub backoff_steps: Vec<i32>
|
||||
}
|
||||
|
||||
impl DescentHistory {
|
||||
fn new() -> DescentHistory {
|
||||
DescentHistory {
|
||||
config: Vec::<DMatrix<f64>>::new(),
|
||||
scaled_loss: Vec::<f64>::new(),
|
||||
neg_grad: Vec::<DMatrix<f64>>::new(),
|
||||
min_eigval: Vec::<f64>::new(),
|
||||
base_step: Vec::<DMatrix<f64>>::new(),
|
||||
backoff_steps: Vec::<i32>::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- gram matrix realization ---
|
||||
|
||||
// the Lorentz form
|
||||
lazy_static! {
|
||||
static ref Q: DMatrix<f64> = DMatrix::from_row_slice(5, 5, &[
|
||||
1.0, 0.0, 0.0, 0.0, 0.0,
|
||||
0.0, 1.0, 0.0, 0.0, 0.0,
|
||||
0.0, 0.0, 1.0, 0.0, 0.0,
|
||||
0.0, 0.0, 0.0, 0.0, -2.0,
|
||||
0.0, 0.0, 0.0, -2.0, 0.0
|
||||
]);
|
||||
}
|
||||
|
||||
struct SearchState {
|
||||
config: DMatrix<f64>,
|
||||
err_proj: DMatrix<f64>,
|
||||
loss: f64
|
||||
}
|
||||
|
||||
impl SearchState {
|
||||
fn from_config(gram: &PartialMatrix, config: DMatrix<f64>) -> SearchState {
|
||||
let err_proj = gram.sub_proj(&(config.tr_mul(&*Q) * &config));
|
||||
let loss = err_proj.norm_squared();
|
||||
SearchState {
|
||||
config: config,
|
||||
err_proj: err_proj,
|
||||
loss: loss
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn basis_matrix(index: (usize, usize), nrows: usize, ncols: usize) -> DMatrix<f64> {
|
||||
let mut result = DMatrix::<f64>::zeros(nrows, ncols);
|
||||
result[index] = 1.0;
|
||||
result
|
||||
}
|
||||
|
||||
// use backtracking line search to find a better configuration
|
||||
fn seek_better_config(
|
||||
gram: &PartialMatrix,
|
||||
state: &SearchState,
|
||||
base_step: &DMatrix<f64>,
|
||||
base_target_improvement: f64,
|
||||
min_efficiency: f64,
|
||||
backoff: f64,
|
||||
max_backoff_steps: i32
|
||||
) -> Option<(SearchState, i32)> {
|
||||
let mut rate = 1.0;
|
||||
for backoff_steps in 0..max_backoff_steps {
|
||||
let trial_config = &state.config + rate * base_step;
|
||||
let trial_state = SearchState::from_config(gram, trial_config);
|
||||
let improvement = state.loss - trial_state.loss;
|
||||
if improvement >= min_efficiency * rate * base_target_improvement {
|
||||
return Some((trial_state, backoff_steps));
|
||||
}
|
||||
rate *= backoff;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
// seek a matrix `config` for which `config' * Q * config` matches the partial
|
||||
// matrix `gram`. use gradient descent starting from `guess`
|
||||
pub fn realize_gram(
|
||||
gram: &PartialMatrix,
|
||||
guess: DMatrix<f64>,
|
||||
frozen: &[(usize, usize)],
|
||||
scaled_tol: f64,
|
||||
min_efficiency: f64,
|
||||
backoff: f64,
|
||||
reg_scale: f64,
|
||||
max_descent_steps: i32,
|
||||
max_backoff_steps: i32
|
||||
) -> (DMatrix<f64>, bool, DescentHistory) {
|
||||
// start the descent history
|
||||
let mut history = DescentHistory::new();
|
||||
|
||||
// find the dimension of the search space
|
||||
let element_dim = guess.nrows();
|
||||
let assembly_dim = guess.ncols();
|
||||
let total_dim = element_dim * assembly_dim;
|
||||
|
||||
// scale the tolerance
|
||||
let scale_adjustment = (gram.0.len() as f64).sqrt();
|
||||
let tol = scale_adjustment * scaled_tol;
|
||||
|
||||
// convert the frozen indices to stacked format
|
||||
let frozen_stacked: Vec<usize> = frozen.into_iter().map(
|
||||
|index| index.1*element_dim + index.0
|
||||
).collect();
|
||||
|
||||
// use Newton's method with backtracking and gradient descent backup
|
||||
let mut state = SearchState::from_config(gram, guess);
|
||||
for _ in 0..max_descent_steps {
|
||||
// stop if the loss is tolerably low
|
||||
history.config.push(state.config.clone());
|
||||
history.scaled_loss.push(state.loss / scale_adjustment);
|
||||
if state.loss < tol { break; }
|
||||
|
||||
// find the negative gradient of the loss function
|
||||
let neg_grad = 4.0 * &*Q * &state.config * &state.err_proj;
|
||||
let mut neg_grad_stacked = neg_grad.clone().reshape_generic(Dyn(total_dim), Const::<1>);
|
||||
history.neg_grad.push(neg_grad.clone());
|
||||
|
||||
// find the negative Hessian of the loss function
|
||||
let mut hess_cols = Vec::<DVector<f64>>::with_capacity(total_dim);
|
||||
for col in 0..assembly_dim {
|
||||
for row in 0..element_dim {
|
||||
let index = (row, col);
|
||||
let basis_mat = basis_matrix(index, element_dim, assembly_dim);
|
||||
let neg_d_err =
|
||||
basis_mat.tr_mul(&*Q) * &state.config
|
||||
+ state.config.tr_mul(&*Q) * &basis_mat;
|
||||
let neg_d_err_proj = gram.proj(&neg_d_err);
|
||||
let deriv_grad = 4.0 * &*Q * (
|
||||
-&basis_mat * &state.err_proj
|
||||
+ &state.config * &neg_d_err_proj
|
||||
);
|
||||
hess_cols.push(deriv_grad.reshape_generic(Dyn(total_dim), Const::<1>));
|
||||
}
|
||||
}
|
||||
let mut hess = DMatrix::from_columns(hess_cols.as_slice());
|
||||
|
||||
// regularize the Hessian
|
||||
let min_eigval = hess.symmetric_eigenvalues().min();
|
||||
if min_eigval <= 0.0 {
|
||||
hess -= reg_scale * min_eigval * DMatrix::identity(total_dim, total_dim);
|
||||
}
|
||||
history.min_eigval.push(min_eigval);
|
||||
|
||||
// project the negative gradient and negative Hessian onto the
|
||||
// orthogonal complement of the frozen subspace
|
||||
let zero_col = DVector::zeros(total_dim);
|
||||
let zero_row = zero_col.transpose();
|
||||
for &k in &frozen_stacked {
|
||||
neg_grad_stacked[k] = 0.0;
|
||||
hess.set_row(k, &zero_row);
|
||||
hess.set_column(k, &zero_col);
|
||||
hess[(k, k)] = 1.0;
|
||||
}
|
||||
|
||||
// compute the Newton step
|
||||
/*
|
||||
we need to either handle or eliminate the case where the minimum
|
||||
eigenvalue of the Hessian is zero, so the regularized Hessian is
|
||||
singular. right now, this causes the Cholesky decomposition to return
|
||||
`None`, leading to a panic when we unrap
|
||||
*/
|
||||
let base_step_stacked = hess.cholesky().unwrap().solve(&neg_grad_stacked);
|
||||
let base_step = base_step_stacked.reshape_generic(Dyn(element_dim), Dyn(assembly_dim));
|
||||
history.base_step.push(base_step.clone());
|
||||
|
||||
// use backtracking line search to find a better configuration
|
||||
match seek_better_config(
|
||||
gram, &state, &base_step, neg_grad.dot(&base_step),
|
||||
min_efficiency, backoff, max_backoff_steps
|
||||
) {
|
||||
Some((better_state, backoff_steps)) => {
|
||||
state = better_state;
|
||||
history.backoff_steps.push(backoff_steps);
|
||||
},
|
||||
None => return (state.config, false, history)
|
||||
};
|
||||
}
|
||||
(state.config, state.loss < tol, history)
|
||||
}
|
||||
|
||||
// --- tests ---
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{array, f64::consts::PI};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn sub_proj_test() {
|
||||
let target = PartialMatrix(vec![
|
||||
MatrixEntry { index: (0, 0), value: 19.0 },
|
||||
MatrixEntry { index: (0, 2), value: 39.0 },
|
||||
MatrixEntry { index: (1, 1), value: 59.0 },
|
||||
MatrixEntry { index: (1, 2), value: 69.0 }
|
||||
]);
|
||||
let attempt = DMatrix::<f64>::from_row_slice(2, 3, &[
|
||||
1.0, 2.0, 3.0,
|
||||
4.0, 5.0, 6.0
|
||||
]);
|
||||
let expected_result = DMatrix::<f64>::from_row_slice(2, 3, &[
|
||||
18.0, 0.0, 36.0,
|
||||
0.0, 54.0, 63.0
|
||||
]);
|
||||
assert_eq!(target.sub_proj(&attempt), expected_result);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn zero_loss_test() {
|
||||
let gram = PartialMatrix({
|
||||
let mut entries = Vec::<MatrixEntry>::new();
|
||||
for j in 0..3 {
|
||||
for k in 0..3 {
|
||||
entries.push(MatrixEntry {
|
||||
index: (j, k),
|
||||
value: if j == k { 1.0 } else { -1.0 }
|
||||
});
|
||||
}
|
||||
}
|
||||
entries
|
||||
});
|
||||
let config = {
|
||||
let a: f64 = 0.75_f64.sqrt();
|
||||
DMatrix::from_columns(&[
|
||||
sphere(1.0, 0.0, 0.0, a),
|
||||
sphere(-0.5, a, 0.0, a),
|
||||
sphere(-0.5, -a, 0.0, a)
|
||||
])
|
||||
};
|
||||
let state = SearchState::from_config(&gram, config);
|
||||
assert!(state.loss.abs() < f64::EPSILON);
|
||||
}
|
||||
|
||||
// this problem is from a sangaku by Irisawa Shintarō Hiroatsu. the article
|
||||
// below includes a nice translation of the problem statement, which was
|
||||
// recorded in Uchida Itsumi's book _Kokon sankan_ (_Mathematics, Past and
|
||||
// Present_)
|
||||
//
|
||||
// "Japan's 'Wasan' Mathematical Tradition", by Abe Haruki
|
||||
// https://www.nippon.com/en/japan-topics/c12801/
|
||||
//
|
||||
#[test]
|
||||
fn irisawa_hexlet_test() {
|
||||
let gram = PartialMatrix({
|
||||
let mut entries = Vec::<MatrixEntry>::new();
|
||||
for s in 0..9 {
|
||||
// each sphere is represented by a spacelike vector
|
||||
entries.push(MatrixEntry { index: (s, s), value: 1.0 });
|
||||
|
||||
// the circumscribing sphere is tangent to all of the other
|
||||
// spheres, with matching orientation
|
||||
if s > 0 {
|
||||
entries.push(MatrixEntry { index: (0, s), value: 1.0 });
|
||||
entries.push(MatrixEntry { index: (s, 0), value: 1.0 });
|
||||
}
|
||||
|
||||
if s > 2 {
|
||||
// each chain sphere is tangent to the "sun" and "moon"
|
||||
// spheres, with opposing orientation
|
||||
for n in 1..3 {
|
||||
entries.push(MatrixEntry { index: (s, n), value: -1.0 });
|
||||
entries.push(MatrixEntry { index: (n, s), value: -1.0 });
|
||||
}
|
||||
|
||||
// each chain sphere is tangent to the next chain sphere,
|
||||
// with opposing orientation
|
||||
let s_next = 3 + (s-2) % 6;
|
||||
entries.push(MatrixEntry { index: (s, s_next), value: -1.0 });
|
||||
entries.push(MatrixEntry { index: (s_next, s), value: -1.0 });
|
||||
}
|
||||
}
|
||||
entries
|
||||
});
|
||||
let guess = DMatrix::from_columns(
|
||||
[
|
||||
sphere(0.0, 0.0, 0.0, 15.0),
|
||||
sphere(0.0, 0.0, -9.0, 5.0),
|
||||
sphere(0.0, 0.0, 11.0, 3.0)
|
||||
].into_iter().chain(
|
||||
(1..=6).map(
|
||||
|k| {
|
||||
let ang = (k as f64) * PI/3.0;
|
||||
sphere(9.0 * ang.cos(), 9.0 * ang.sin(), 0.0, 2.5)
|
||||
}
|
||||
)
|
||||
).collect::<Vec<_>>().as_slice()
|
||||
);
|
||||
let frozen: [(usize, usize); 4] = array::from_fn(|k| (3, k));
|
||||
const SCALED_TOL: f64 = 1.0e-12;
|
||||
let (config, success, history) = realize_gram(
|
||||
&gram, guess, &frozen,
|
||||
SCALED_TOL, 0.5, 0.9, 1.1, 200, 110
|
||||
);
|
||||
let entry_tol = SCALED_TOL.sqrt();
|
||||
let solution_diams = [30.0, 10.0, 6.0, 5.0, 15.0, 10.0, 3.75, 2.5, 2.0 + 8.0/11.0];
|
||||
for (k, diam) in solution_diams.into_iter().enumerate() {
|
||||
assert!((config[(3, k)] - 1.0 / diam).abs() < entry_tol);
|
||||
}
|
||||
print!("\nCompleted Gram matrix:{}", config.tr_mul(&*Q) * &config);
|
||||
if success {
|
||||
println!("Target accuracy achieved!");
|
||||
} else {
|
||||
println!("Failed to reach target accuracy");
|
||||
}
|
||||
println!("Steps: {}", history.scaled_loss.len() - 1);
|
||||
println!("Loss: {}", history.scaled_loss.last().unwrap());
|
||||
if success {
|
||||
println!("\nChain diameters:");
|
||||
println!(" {} sun (given)", 1.0 / config[(3, 3)]);
|
||||
for k in 4..9 {
|
||||
println!(" {} sun", 1.0 / config[(3, k)]);
|
||||
}
|
||||
}
|
||||
println!("\nStep │ Loss\n─────┼────────────────────────────────");
|
||||
for (step, scaled_loss) in history.scaled_loss.into_iter().enumerate() {
|
||||
println!("{:<4} │ {}", step, scaled_loss);
|
||||
}
|
||||
}
|
||||
|
||||
// --- process inspection examples ---
|
||||
|
||||
// these tests are meant for human inspection, not automated use. run them
|
||||
// one at a time in `--nocapture` mode and read through the results and
|
||||
// optimization histories that they print out. the `run-examples` script
|
||||
// will run all of them
|
||||
|
||||
#[test]
|
||||
fn three_spheres_example() {
|
||||
let gram = PartialMatrix({
|
||||
let mut entries = Vec::<MatrixEntry>::new();
|
||||
for j in 0..3 {
|
||||
for k in 0..3 {
|
||||
entries.push(MatrixEntry {
|
||||
index: (j, k),
|
||||
value: if j == k { 1.0 } else { -1.0 }
|
||||
});
|
||||
}
|
||||
}
|
||||
entries
|
||||
});
|
||||
let guess = {
|
||||
let a: f64 = 0.75_f64.sqrt();
|
||||
DMatrix::from_columns(&[
|
||||
sphere(1.0, 0.0, 0.0, 1.0),
|
||||
sphere(-0.5, a, 0.0, 1.0),
|
||||
sphere(-0.5, -a, 0.0, 1.0)
|
||||
])
|
||||
};
|
||||
println!();
|
||||
let (config, success, history) = realize_gram(
|
||||
&gram, guess, &[],
|
||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||
);
|
||||
print!("\nCompleted Gram matrix:{}", config.tr_mul(&*Q) * &config);
|
||||
if success {
|
||||
println!("Target accuracy achieved!");
|
||||
} else {
|
||||
println!("Failed to reach target accuracy");
|
||||
}
|
||||
println!("Steps: {}", history.scaled_loss.len() - 1);
|
||||
println!("Loss: {}", history.scaled_loss.last().unwrap());
|
||||
println!("\nStep │ Loss\n─────┼────────────────────────────────");
|
||||
for (step, scaled_loss) in history.scaled_loss.into_iter().enumerate() {
|
||||
println!("{:<4} │ {}", step, scaled_loss);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn point_on_sphere_example() {
|
||||
let gram = PartialMatrix({
|
||||
let mut entries = Vec::<MatrixEntry>::new();
|
||||
for j in 0..2 {
|
||||
for k in 0..2 {
|
||||
entries.push(MatrixEntry {
|
||||
index: (j, k),
|
||||
value: if (j, k) == (1, 1) { 1.0 } else { 0.0 }
|
||||
});
|
||||
}
|
||||
}
|
||||
entries
|
||||
});
|
||||
let guess = DMatrix::from_columns(&[
|
||||
point(0.0, 0.0, 2.0),
|
||||
sphere(0.0, 0.0, 0.0, 1.0)
|
||||
]);
|
||||
let frozen = [(3, 0)];
|
||||
println!();
|
||||
let (config, success, history) = realize_gram(
|
||||
&gram, guess, &frozen,
|
||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||
);
|
||||
print!("\nCompleted Gram matrix:{}", config.tr_mul(&*Q) * &config);
|
||||
print!("Configuration:{}", config);
|
||||
if success {
|
||||
println!("Target accuracy achieved!");
|
||||
} else {
|
||||
println!("Failed to reach target accuracy");
|
||||
}
|
||||
println!("Steps: {}", history.scaled_loss.len() - 1);
|
||||
println!("Loss: {}", history.scaled_loss.last().unwrap());
|
||||
println!("\nStep │ Loss\n─────┼────────────────────────────────");
|
||||
for (step, scaled_loss) in history.scaled_loss.into_iter().enumerate() {
|
||||
println!("{:<4} │ {}", step, scaled_loss);
|
||||
}
|
||||
}
|
||||
|
||||
/* TO DO */
|
||||
// --- new test placed here to avoid merge conflict ---
|
||||
|
||||
// at the frozen indices, the optimization steps should have exact zeros,
|
||||
// and the realized configuration should match the initial guess
|
||||
#[test]
|
||||
fn frozen_entry_test() {
|
||||
let gram = {
|
||||
let mut gram_to_be = PartialMatrix::new();
|
||||
for j in 0..2 {
|
||||
for k in j..2 {
|
||||
gram_to_be.push_sym(j, k, if (j, k) == (1, 1) { 1.0 } else { 0.0 });
|
||||
}
|
||||
}
|
||||
gram_to_be
|
||||
};
|
||||
let guess = DMatrix::from_columns(&[
|
||||
point(0.0, 0.0, 2.0),
|
||||
sphere(0.0, 0.0, 0.0, 1.0)
|
||||
]);
|
||||
let frozen = [(3, 0), (3, 1)];
|
||||
println!();
|
||||
let (config, success, history) = realize_gram(
|
||||
&gram, guess.clone(), &frozen,
|
||||
1.0e-12, 0.5, 0.9, 1.1, 200, 110
|
||||
);
|
||||
assert_eq!(success, true);
|
||||
for base_step in history.base_step.into_iter() {
|
||||
for index in frozen {
|
||||
assert_eq!(base_step[index], 0.0);
|
||||
}
|
||||
}
|
||||
for index in frozen {
|
||||
assert_eq!(config[index], guess[index]);
|
||||
}
|
||||
}
|
||||
}
|
@ -8,14 +8,14 @@ use rustc_hash::FxHashSet;
|
||||
use sycamore::prelude::*;
|
||||
|
||||
use add_remove::AddRemove;
|
||||
use assembly::{Assembly, ElementKey};
|
||||
use assembly::Assembly;
|
||||
use display::Display;
|
||||
use outline::Outline;
|
||||
|
||||
#[derive(Clone)]
|
||||
struct AppState {
|
||||
assembly: Assembly,
|
||||
selection: Signal<FxHashSet<ElementKey>>
|
||||
selection: Signal<FxHashSet<usize>>
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
|
@ -1,191 +1,26 @@
|
||||
use itertools::Itertools;
|
||||
use sycamore::prelude::*;
|
||||
use web_sys::{
|
||||
Event,
|
||||
HtmlInputElement,
|
||||
KeyboardEvent,
|
||||
MouseEvent,
|
||||
wasm_bindgen::JsCast
|
||||
};
|
||||
use sycamore::{prelude::*, web::tags::div};
|
||||
use web_sys::{Element, KeyboardEvent, MouseEvent, wasm_bindgen::JsCast};
|
||||
|
||||
use crate::{AppState, assembly, assembly::{Constraint, ConstraintKey, ElementKey}};
|
||||
use crate::AppState;
|
||||
|
||||
// an editable view of the Lorentz product representing a constraint
|
||||
#[component(inline_props)]
|
||||
fn LorentzProductInput(constraint: Constraint) -> View {
|
||||
view! {
|
||||
input(
|
||||
r#type="text",
|
||||
bind:value=constraint.lorentz_prod_text,
|
||||
on:change=move |event: Event| {
|
||||
let target: HtmlInputElement = event.target().unwrap().unchecked_into();
|
||||
match target.value().parse::<f64>() {
|
||||
Ok(lorentz_prod) => batch(|| {
|
||||
constraint.lorentz_prod.set(lorentz_prod);
|
||||
constraint.lorentz_prod_valid.set(true);
|
||||
}),
|
||||
Err(_) => constraint.lorentz_prod_valid.set(false)
|
||||
};
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// a list item that shows a constraint in an outline view of an element
|
||||
#[component(inline_props)]
|
||||
fn ConstraintOutlineItem(constraint_key: ConstraintKey, element_key: ElementKey) -> View {
|
||||
let state = use_context::<AppState>();
|
||||
let assembly = &state.assembly;
|
||||
let constraint = assembly.constraints.with(|csts| csts[constraint_key].clone());
|
||||
let other_subject = if constraint.subjects.0 == element_key {
|
||||
constraint.subjects.1
|
||||
} else {
|
||||
constraint.subjects.0
|
||||
};
|
||||
let other_subject_label = assembly.elements.with(|elts| elts[other_subject].label.clone());
|
||||
let class = constraint.lorentz_prod_valid.map(
|
||||
|&lorentz_prod_valid| if lorentz_prod_valid { "constraint" } else { "constraint invalid" }
|
||||
);
|
||||
view! {
|
||||
li(class=class.get()) {
|
||||
input(r#type="checkbox", bind:checked=constraint.active)
|
||||
div(class="constraint-label") { (other_subject_label) }
|
||||
LorentzProductInput(constraint=constraint)
|
||||
div(class="status")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// a list item that shows an element in an outline view of an assembly
|
||||
#[component(inline_props)]
|
||||
fn ElementOutlineItem(key: ElementKey, element: assembly::Element) -> View {
|
||||
let state = use_context::<AppState>();
|
||||
let class = state.selection.map(
|
||||
move |sel| if sel.contains(&key) { "selected" } else { "" }
|
||||
);
|
||||
let label = element.label.clone();
|
||||
let rep_components = element.representation.map(
|
||||
|rep| rep.iter().map(
|
||||
|u| format!("{:.3}", u).replace("-", "\u{2212}")
|
||||
).collect()
|
||||
);
|
||||
let constrained = element.constraints.map(|csts| csts.len() > 0);
|
||||
let constraint_list = element.constraints.map(
|
||||
|csts| csts.clone().into_iter().collect()
|
||||
);
|
||||
let details_node = create_node_ref();
|
||||
view! {
|
||||
li {
|
||||
details(ref=details_node) {
|
||||
summary(
|
||||
class=class.get(),
|
||||
on:keydown={
|
||||
move |event: KeyboardEvent| {
|
||||
match event.key().as_str() {
|
||||
"Enter" => {
|
||||
if event.shift_key() {
|
||||
state.selection.update(|sel| {
|
||||
if !sel.remove(&key) {
|
||||
sel.insert(key);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
state.selection.update(|sel| {
|
||||
sel.clear();
|
||||
sel.insert(key);
|
||||
});
|
||||
}
|
||||
event.prevent_default();
|
||||
},
|
||||
"ArrowRight" if constrained.get() => {
|
||||
let _ = details_node
|
||||
.get()
|
||||
.unchecked_into::<web_sys::Element>()
|
||||
.set_attribute("open", "");
|
||||
},
|
||||
"ArrowLeft" => {
|
||||
let _ = details_node
|
||||
.get()
|
||||
.unchecked_into::<web_sys::Element>()
|
||||
.remove_attribute("open");
|
||||
},
|
||||
_ => ()
|
||||
}
|
||||
}
|
||||
}
|
||||
) {
|
||||
div(
|
||||
class="element-switch",
|
||||
on:click=|event: MouseEvent| event.stop_propagation()
|
||||
)
|
||||
div(
|
||||
class="element",
|
||||
on:click={
|
||||
move |event: MouseEvent| {
|
||||
if event.shift_key() {
|
||||
state.selection.update(|sel| {
|
||||
if !sel.remove(&key) {
|
||||
sel.insert(key);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
state.selection.update(|sel| {
|
||||
sel.clear();
|
||||
sel.insert(key);
|
||||
});
|
||||
}
|
||||
event.stop_propagation();
|
||||
event.prevent_default();
|
||||
}
|
||||
}
|
||||
) {
|
||||
div(class="element-label") { (label) }
|
||||
div(class="element-representation") {
|
||||
Indexed(
|
||||
list=rep_components,
|
||||
view=|coord_str| view! {
|
||||
div { (coord_str) }
|
||||
}
|
||||
)
|
||||
}
|
||||
div(class="status")
|
||||
}
|
||||
}
|
||||
ul(class="constraints") {
|
||||
Keyed(
|
||||
list=constraint_list,
|
||||
view=move |cst_key| view! {
|
||||
ConstraintOutlineItem(
|
||||
constraint_key=cst_key,
|
||||
element_key=key
|
||||
)
|
||||
},
|
||||
key=|cst_key| cst_key.clone()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// a component that lists the elements of the current assembly, showing the
|
||||
// constraints on each element as a collapsible sub-list. its implementation
|
||||
// is based on Kate Morley's HTML + CSS tree views:
|
||||
// this component lists the elements of the assembly, showing the constraints
|
||||
// on each element as a collapsible sub-list. its implementation is based on
|
||||
// Kate Morley's HTML + CSS tree views:
|
||||
//
|
||||
// https://iamkate.com/code/tree-views/
|
||||
//
|
||||
#[component]
|
||||
pub fn Outline() -> View {
|
||||
let state = use_context::<AppState>();
|
||||
|
||||
// list the elements alphabetically by ID
|
||||
let element_list = state.assembly.elements.map(
|
||||
|elts| elts
|
||||
.clone()
|
||||
// sort the elements alphabetically by ID
|
||||
let elements_sorted = create_memo(|| {
|
||||
let state = use_context::<AppState>();
|
||||
state.assembly.elements
|
||||
.get_clone()
|
||||
.into_iter()
|
||||
.sorted_by_key(|(_, elt)| elt.id.clone())
|
||||
.collect()
|
||||
);
|
||||
});
|
||||
|
||||
view! {
|
||||
ul(
|
||||
@ -196,11 +31,130 @@ pub fn Outline() -> View {
|
||||
}
|
||||
) {
|
||||
Keyed(
|
||||
list=element_list,
|
||||
view=|(key, elt)| view! {
|
||||
ElementOutlineItem(key=key, element=elt)
|
||||
list=elements_sorted,
|
||||
view=|(key, elt)| {
|
||||
let state = use_context::<AppState>();
|
||||
let class = create_memo({
|
||||
move || {
|
||||
if state.selection.with(|sel| sel.contains(&key)) {
|
||||
"selected"
|
||||
} else {
|
||||
""
|
||||
}
|
||||
}
|
||||
});
|
||||
let label = elt.label.clone();
|
||||
let rep_components = elt.rep.iter().map(|u| {
|
||||
let u_coord = u.to_string().replace("-", "\u{2212}");
|
||||
View::from(div().children(u_coord))
|
||||
}).collect::<Vec<_>>();
|
||||
let constrained = elt.constraints.len() > 0;
|
||||
let details_node = create_node_ref();
|
||||
view! {
|
||||
/* [TO DO] switch to integer-valued parameters whenever
|
||||
that becomes possible again */
|
||||
li {
|
||||
details(ref=details_node) {
|
||||
summary(
|
||||
class=class.get(),
|
||||
on:keydown={
|
||||
move |event: KeyboardEvent| {
|
||||
match event.key().as_str() {
|
||||
"Enter" => {
|
||||
if event.shift_key() {
|
||||
state.selection.update(|sel| {
|
||||
if !sel.remove(&key) {
|
||||
sel.insert(key);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
state.selection.update(|sel| {
|
||||
sel.clear();
|
||||
sel.insert(key);
|
||||
});
|
||||
}
|
||||
event.prevent_default();
|
||||
},
|
||||
"ArrowRight" if constrained => {
|
||||
let _ = details_node
|
||||
.get()
|
||||
.unchecked_into::<Element>()
|
||||
.set_attribute("open", "");
|
||||
},
|
||||
"ArrowLeft" => {
|
||||
let _ = details_node
|
||||
.get()
|
||||
.unchecked_into::<Element>()
|
||||
.remove_attribute("open");
|
||||
},
|
||||
_ => ()
|
||||
}
|
||||
}
|
||||
}
|
||||
) {
|
||||
div(
|
||||
class="elt-switch",
|
||||
on:click=|event: MouseEvent| event.stop_propagation()
|
||||
)
|
||||
div(
|
||||
class="elt",
|
||||
on:click={
|
||||
move |event: MouseEvent| {
|
||||
if event.shift_key() {
|
||||
state.selection.update(|sel| {
|
||||
if !sel.remove(&key) {
|
||||
sel.insert(key);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
state.selection.update(|sel| {
|
||||
sel.clear();
|
||||
sel.insert(key);
|
||||
});
|
||||
}
|
||||
event.stop_propagation();
|
||||
event.prevent_default();
|
||||
}
|
||||
}
|
||||
) {
|
||||
div(class="elt-label") { (label) }
|
||||
div(class="elt-rep") { (rep_components) }
|
||||
}
|
||||
}
|
||||
ul(class="constraints") {
|
||||
Keyed(
|
||||
list=elt.constraints.into_iter().collect::<Vec<_>>(),
|
||||
view=move |c_key: usize| {
|
||||
let c_state = use_context::<AppState>();
|
||||
let assembly = &c_state.assembly;
|
||||
let cst = assembly.constraints.with(|csts| csts[c_key].clone());
|
||||
let other_arg = if cst.args.0 == key {
|
||||
cst.args.1
|
||||
} else {
|
||||
cst.args.0
|
||||
};
|
||||
let other_arg_label = assembly.elements.with(|elts| elts[other_arg].label.clone());
|
||||
view! {
|
||||
li(class="cst") {
|
||||
input(r#type="checkbox", bind:checked=cst.active)
|
||||
div(class="cst-label") { (other_arg_label) }
|
||||
div(class="cst-rep") { (cst.rep) }
|
||||
}
|
||||
}
|
||||
},
|
||||
key=|c_key| c_key.clone()
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
key=|(key, _)| key.clone()
|
||||
key=|(key, elt)| (
|
||||
key.clone(),
|
||||
elt.id.clone(),
|
||||
elt.label.clone(),
|
||||
elt.constraints.clone()
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -8,8 +8,7 @@ using Optim
|
||||
|
||||
export
|
||||
rand_on_shell, Q, DescentHistory,
|
||||
realize_gram_gradient, realize_gram_newton, realize_gram_optim,
|
||||
realize_gram_alt_proj, realize_gram
|
||||
realize_gram_gradient, realize_gram_newton, realize_gram_optim, realize_gram
|
||||
|
||||
# === guessing ===
|
||||
|
||||
@ -144,7 +143,7 @@ function realize_gram_gradient(
|
||||
break
|
||||
end
|
||||
|
||||
# find the negative gradient of the loss function
|
||||
# find negative gradient of loss function
|
||||
neg_grad = 4*Q*L*Δ_proj
|
||||
slope = norm(neg_grad)
|
||||
dir = neg_grad / slope
|
||||
@ -233,7 +232,7 @@ function realize_gram_newton(
|
||||
break
|
||||
end
|
||||
|
||||
# find the negative gradient of the loss function
|
||||
# find the negative gradient of loss function
|
||||
neg_grad = 4*Q*L*Δ_proj
|
||||
|
||||
# find the negative Hessian of the loss function
|
||||
@ -314,129 +313,6 @@ function realize_gram_optim(
|
||||
)
|
||||
end
|
||||
|
||||
# seek a matrix `L` for which `L'QL` matches the sparse matrix `gram` at every
|
||||
# explicit entry of `gram`. use gradient descent starting from `guess`, with an
|
||||
# alternate technique for finding the projected base step from the unprojected
|
||||
# Hessian
|
||||
function realize_gram_alt_proj(
|
||||
gram::SparseMatrixCSC{T, <:Any},
|
||||
guess::Matrix{T},
|
||||
frozen = CartesianIndex[];
|
||||
scaled_tol = 1e-30,
|
||||
min_efficiency = 0.5,
|
||||
backoff = 0.9,
|
||||
reg_scale = 1.1,
|
||||
max_descent_steps = 200,
|
||||
max_backoff_steps = 110
|
||||
) where T <: Number
|
||||
# start history
|
||||
history = DescentHistory{T}()
|
||||
|
||||
# find the dimension of the search space
|
||||
dims = size(guess)
|
||||
element_dim, construction_dim = dims
|
||||
total_dim = element_dim * construction_dim
|
||||
|
||||
# list the constrained entries of the gram matrix
|
||||
J, K, _ = findnz(gram)
|
||||
constrained = zip(J, K)
|
||||
|
||||
# scale the tolerance
|
||||
scale_adjustment = sqrt(T(length(constrained)))
|
||||
tol = scale_adjustment * scaled_tol
|
||||
|
||||
# convert the frozen indices to stacked format
|
||||
frozen_stacked = [(index[2]-1)*element_dim + index[1] for index in frozen]
|
||||
|
||||
# initialize search state
|
||||
L = copy(guess)
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = dot(Δ_proj, Δ_proj)
|
||||
|
||||
# use Newton's method with backtracking and gradient descent backup
|
||||
for step in 1:max_descent_steps
|
||||
# stop if the loss is tolerably low
|
||||
if loss < tol
|
||||
break
|
||||
end
|
||||
|
||||
# find the negative gradient of the loss function
|
||||
neg_grad = 4*Q*L*Δ_proj
|
||||
|
||||
# find the negative Hessian of the loss function
|
||||
hess = Matrix{T}(undef, total_dim, total_dim)
|
||||
indices = [(j, k) for k in 1:construction_dim for j in 1:element_dim]
|
||||
for (j, k) in indices
|
||||
basis_mat = basis_matrix(T, j, k, dims)
|
||||
neg_dΔ = basis_mat'*Q*L + L'*Q*basis_mat
|
||||
neg_dΔ_proj = proj_to_entries(neg_dΔ, constrained)
|
||||
deriv_grad = 4*Q*(-basis_mat*Δ_proj + L*neg_dΔ_proj)
|
||||
hess[:, (k-1)*element_dim + j] = reshape(deriv_grad, total_dim)
|
||||
end
|
||||
hess_sym = Hermitian(hess)
|
||||
push!(history.hess, hess_sym)
|
||||
|
||||
# regularize the Hessian
|
||||
min_eigval = minimum(eigvals(hess_sym))
|
||||
push!(history.positive, min_eigval > 0)
|
||||
if min_eigval <= 0
|
||||
hess -= reg_scale * min_eigval * I
|
||||
end
|
||||
|
||||
# compute the Newton step
|
||||
neg_grad_stacked = reshape(neg_grad, total_dim)
|
||||
for k in frozen_stacked
|
||||
neg_grad_stacked[k] = 0
|
||||
hess[k, :] .= 0
|
||||
hess[:, k] .= 0
|
||||
hess[k, k] = 1
|
||||
end
|
||||
base_step_stacked = Hermitian(hess) \ neg_grad_stacked
|
||||
base_step = reshape(base_step_stacked, dims)
|
||||
push!(history.base_step, base_step)
|
||||
|
||||
# store the current position, loss, and slope
|
||||
L_last = L
|
||||
loss_last = loss
|
||||
push!(history.scaled_loss, loss / scale_adjustment)
|
||||
push!(history.neg_grad, neg_grad)
|
||||
push!(history.slope, norm(neg_grad))
|
||||
|
||||
# find a good step size using backtracking line search
|
||||
push!(history.stepsize, 0)
|
||||
push!(history.backoff_steps, max_backoff_steps)
|
||||
empty!(history.last_line_L)
|
||||
empty!(history.last_line_loss)
|
||||
rate = one(T)
|
||||
step_success = false
|
||||
base_target_improvement = dot(neg_grad, base_step)
|
||||
for backoff_steps in 0:max_backoff_steps
|
||||
history.stepsize[end] = rate
|
||||
L = L_last + rate * base_step
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = dot(Δ_proj, Δ_proj)
|
||||
improvement = loss_last - loss
|
||||
push!(history.last_line_L, L)
|
||||
push!(history.last_line_loss, loss / scale_adjustment)
|
||||
if improvement >= min_efficiency * rate * base_target_improvement
|
||||
history.backoff_steps[end] = backoff_steps
|
||||
step_success = true
|
||||
break
|
||||
end
|
||||
rate *= backoff
|
||||
end
|
||||
|
||||
# if we've hit a wall, quit
|
||||
if !step_success
|
||||
return L_last, false, history
|
||||
end
|
||||
end
|
||||
|
||||
# return the factorization and its history
|
||||
push!(history.scaled_loss, loss / scale_adjustment)
|
||||
L, loss < tol, history
|
||||
end
|
||||
|
||||
# seek a matrix `L` for which `L'QL` matches the sparse matrix `gram` at every
|
||||
# explicit entry of `gram`. use gradient descent starting from `guess`
|
||||
function realize_gram(
|
||||
@ -445,6 +321,7 @@ function realize_gram(
|
||||
frozen = nothing;
|
||||
scaled_tol = 1e-30,
|
||||
min_efficiency = 0.5,
|
||||
init_rate = 1.0,
|
||||
backoff = 0.9,
|
||||
reg_scale = 1.1,
|
||||
max_descent_steps = 200,
|
||||
@ -475,19 +352,20 @@ function realize_gram(
|
||||
unfrozen_stacked = reshape(is_unfrozen, total_dim)
|
||||
end
|
||||
|
||||
# initialize search state
|
||||
# initialize variables
|
||||
grad_rate = init_rate
|
||||
L = copy(guess)
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = dot(Δ_proj, Δ_proj)
|
||||
|
||||
# use Newton's method with backtracking and gradient descent backup
|
||||
Δ_proj = proj_diff(gram, L'*Q*L)
|
||||
loss = dot(Δ_proj, Δ_proj)
|
||||
for step in 1:max_descent_steps
|
||||
# stop if the loss is tolerably low
|
||||
if loss < tol
|
||||
break
|
||||
end
|
||||
|
||||
# find the negative gradient of the loss function
|
||||
# find the negative gradient of loss function
|
||||
neg_grad = 4*Q*L*Δ_proj
|
||||
|
||||
# find the negative Hessian of the loss function
|
||||
@ -542,7 +420,6 @@ function realize_gram(
|
||||
empty!(history.last_line_loss)
|
||||
rate = one(T)
|
||||
step_success = false
|
||||
base_target_improvement = dot(neg_grad, base_step)
|
||||
for backoff_steps in 0:max_backoff_steps
|
||||
history.stepsize[end] = rate
|
||||
L = L_last + rate * base_step
|
||||
@ -551,7 +428,7 @@ function realize_gram(
|
||||
improvement = loss_last - loss
|
||||
push!(history.last_line_L, L)
|
||||
push!(history.last_line_loss, loss / scale_adjustment)
|
||||
if improvement >= min_efficiency * rate * base_target_improvement
|
||||
if improvement >= min_efficiency * rate * dot(neg_grad, base_step)
|
||||
history.backoff_steps[end] = backoff_steps
|
||||
step_success = true
|
||||
break
|
||||
|
@ -74,13 +74,4 @@ if success
|
||||
for k in 5:9
|
||||
println(" ", 1 / L[4,k], " sun")
|
||||
end
|
||||
end
|
||||
|
||||
# test an alternate technique for finding the projected base step from the
|
||||
# unprojected Hessian
|
||||
L_alt, success_alt, history_alt = Engine.realize_gram_alt_proj(gram, guess, frozen)
|
||||
completed_gram_alt = L_alt'*Engine.Q*L_alt
|
||||
println("\nDifference in result using alternate projection:\n")
|
||||
display(completed_gram_alt - completed_gram)
|
||||
println("\nDifference in steps: ", size(history_alt.scaled_loss, 1) - size(history.scaled_loss, 1))
|
||||
println("Difference in loss: ", history_alt.scaled_loss[end] - history.scaled_loss[end], "\n")
|
||||
end
|
@ -64,13 +64,4 @@ else
|
||||
println("\nFailed to reach target accuracy")
|
||||
end
|
||||
println("Steps: ", size(history.scaled_loss, 1))
|
||||
println("Loss: ", history.scaled_loss[end], "\n")
|
||||
|
||||
# test an alternate technique for finding the projected base step from the
|
||||
# unprojected Hessian
|
||||
L_alt, success_alt, history_alt = Engine.realize_gram_alt_proj(gram, guess, frozen)
|
||||
completed_gram_alt = L_alt'*Engine.Q*L_alt
|
||||
println("\nDifference in result using alternate projection:\n")
|
||||
display(completed_gram_alt - completed_gram)
|
||||
println("\nDifference in steps: ", size(history_alt.scaled_loss, 1) - size(history.scaled_loss, 1))
|
||||
println("Difference in loss: ", history_alt.scaled_loss[end] - history.scaled_loss[end], "\n")
|
||||
println("Loss: ", history.scaled_loss[end], "\n")
|
@ -93,13 +93,4 @@ if success
|
||||
infty = BigFloat[0, 0, 0, 0, 1]
|
||||
radius_ratio = dot(infty, Engine.Q * L[:,5]) / dot(infty, Engine.Q * L[:,6])
|
||||
println("\nCircumradius / inradius: ", radius_ratio)
|
||||
end
|
||||
|
||||
# test an alternate technique for finding the projected base step from the
|
||||
# unprojected Hessian
|
||||
L_alt, success_alt, history_alt = Engine.realize_gram_alt_proj(gram, guess, frozen)
|
||||
completed_gram_alt = L_alt'*Engine.Q*L_alt
|
||||
println("\nDifference in result using alternate projection:\n")
|
||||
display(completed_gram_alt - completed_gram)
|
||||
println("\nDifference in steps: ", size(history_alt.scaled_loss, 1) - size(history.scaled_loss, 1))
|
||||
println("Difference in loss: ", history_alt.scaled_loss[end] - history.scaled_loss[end], "\n")
|
||||
end
|
@ -41,25 +41,3 @@ I will have to work out formulas for the Euclidean distance between two entities
|
||||
In this vein, it seems as though if J1 and J2 are the reps of two points, then Q(J1,J2) = d^2/2. So then the sphere centered at J1 through J2 is (J1-(2Q(J1,J2),0,0,0,0))/sqrt(2Q(J1,J2)). Ugh has a sqrt in it. Similarly for sphere centered at J3 through J2, (J3-(2Q(J3,J2),0000))/sqrt(2Q(J3,J2)). J1,J2,J3 are collinear if these spheres are tangent, i.e. if those vectors have Q-inner-product 1, which is to say Q(J1,J3) - Q(J1,J2) - Q(J3,J2) = 2sqrt(Q(J1,J2)Q(J2,J3)). But maybe that's not the simplest way of putting it. After all, we can just say that the cross-product of the two differences is 0; that has no square roots in it.
|
||||
|
||||
One conceivable way to canonicalize lines is to use the *perpendicular* plane that goes through the origin, that's uniquely defined, and anyway just amounts to I = (0,0,d) where d is the ordinary direction vector of the line; and a point J in that plane that the line goes through, which just amounts to J=(r^2,1,E) with Q(I,J) = 0, i.e. E\dot d = 0. It's also the point on the line closest to the origin. The reason that we don't usually use that point as the companion to the direction vector is that the resulting set of six coordinates is not homogeneous. But here that's not an issue, since we have our standard point coordinates and plane coordinates; and for a plane through the origin, only two of the direction coordinates are really free, and then we have the one dot-product relation, so only two of the point coordinates are really free, giving us the correct dimensionality of 4 for the set of lines. So in some sense this says that we could take naively as coordinates for a line the projection of the unit direction vector to the xy plane and the projection of the line's closest point to the origin to the xy plane. That doesn't seem to have any weird gimbal locks or discontinuities or anything. And with these coordinates, you can test if the point E=x,y,z is on the line (dx,dy,cx,cy) by extending (dx,dy) to d via dz = sqrt(1-dx^2 - dy^2), extending (cx,cy) to c by determining cz via d\dot c = 0, and then checking if d\cross(E-c) = 0. And you can see if two lines are parallel just by checking if they have the same direction vector, and if not, you can see if they are coplanar by projecting both of their closest points perpendicularly onto the line in the direction of the cross product of their directions, and if the projections match they are coplanar.
|
||||
|
||||
#### Engine Conventions
|
||||
|
||||
The coordinate conventions used in the engine are different from the ones used in these notes. Marking the engine vectors and coordinates with $'$, we have
|
||||
$$I' = (x', y', z', b', c'),$$
|
||||
where
|
||||
$$
|
||||
\begin{align*}
|
||||
x' & = x & b' & = b/2 \\
|
||||
y' & = y & c' & = c/2. \\
|
||||
z' & = z
|
||||
\end{align*}
|
||||
$$
|
||||
The engine uses the quadratic form $Q' = -Q$, which is expressed in engine coordinates as
|
||||
$$Q'(I'_1, I'_2) = x'_1 x'_2 + y'_1 y'_2 + z'_1 z'_2 - 2(b'_1c'_2 + c'_1 b'_2).$$
|
||||
In the `engine` module, the matrix of $Q'$ is encoded in the lazy static variable `Q`.
|
||||
|
||||
In the engine's coordinate conventions, a sphere with radius $r > 0$ centered on $P = (P_x, P_y, P_z)$ is represented by the vector
|
||||
$$I'_s = \left(\frac{P_x}{r}, \frac{P_y}{r}, \frac{P_z}{r}, \frac1{2r}, \frac{\|P\|^2 - r^2}{2r}\right),$$
|
||||
which has the normalization $Q'(I'_s, I'_s) = 1$. The point $P$ is represented by the vector
|
||||
$$I'_P = \left(P_x, P_y, P_z, \frac{1}{2}, \frac{\|P\|^2}{2}\right).$$
|
||||
In the `engine` module, these formulas are encoded in the `sphere` and `point` functions.
|
Loading…
Reference in New Issue
Block a user