dyna3/app-proto/inversive-display/src/inversive.frag

221 lines
6.7 KiB
GLSL
Raw Normal View History

#version 300 es
precision highp float;
out vec4 outColor;
// --- inversive geometry ---
struct vecInv {
vec3 sp;
vec2 lt;
};
vecInv sphere(vec3 center, float radius) {
return vecInv(
center / radius,
vec2(
0.5 / radius,
0.5 * (dot(center, center) / radius - radius)
)
);
}
// --- uniforms ---
// construction. the SPHERE_MAX array size seems to affect frame rate a lot,
// even though we should only be using the first few elements of each array
const int SPHERE_MAX_UNIFORM = 200;
uniform int sphere_cnt;
uniform vecInv sphere_list[SPHERE_MAX_UNIFORM];
uniform vec3 color_list[SPHERE_MAX_UNIFORM];
// view
uniform vec2 resolution;
uniform float shortdim;
// controls
uniform vec2 ctrl;
uniform vec2 radius;
uniform float opacity;
uniform float highlight;
uniform int layer_threshold;
uniform bool debug_mode;
// light and camera
const float focal_slope = 0.3;
const vec3 light_dir = normalize(vec3(2., 2., 1.));
const float ixn_threshold = 0.005;
// --- sRGB ---
// map colors from RGB space to sRGB space, as specified in the sRGB standard
// (IEC 61966-2-1:1999)
//
// https://www.color.org/sRGB.pdf
// https://www.color.org/chardata/rgb/srgb.xalter
//
// in RGB space, color value is proportional to light intensity, so linear
// color-vector interpolation corresponds to physical light mixing. in sRGB
// space, the color encoding used by many monitors, we use more of the value
// interval to represent low intensities, and less of the interval to represent
// high intensities. this improves color quantization
float sRGB(float t) {
if (t <= 0.0031308) {
return 12.92*t;
} else {
return 1.055*pow(t, 5./12.) - 0.055;
}
}
vec3 sRGB(vec3 color) {
return vec3(sRGB(color.r), sRGB(color.g), sRGB(color.b));
}
// --- shading ---
struct taggedFrag {
int id;
vec4 color;
vec3 pt;
vec3 normal;
};
taggedFrag[2] sort(taggedFrag a, taggedFrag b) {
taggedFrag[2] result;
if (a.pt.z > b.pt.z) {
result[0] = a;
result[1] = b;
} else {
result[0] = b;
result[1] = a;
}
return result;
}
taggedFrag sphere_shading(vecInv v, vec3 pt, vec3 base_color, int id) {
// the expression for normal needs to be checked. it's supposed to give the
// negative gradient of the lorentz product between the impact point vector
// and the sphere vector with respect to the coordinates of the impact
// point. i calculated it in my head and decided that the result looked good
// enough for now
vec3 normal = normalize(-v.sp + 2.*v.lt.s*pt);
float incidence = dot(normal, light_dir);
float illum = mix(0.4, 1.0, max(incidence, 0.0));
return taggedFrag(id, vec4(illum * base_color, opacity), pt, normal);
}
// --- ray-casting ---
vec2 sphere_cast(vecInv v, vec3 dir) {
float a = -v.lt.s * dot(dir, dir);
float b = dot(v.sp, dir);
float c = -v.lt.t;
float scale = -b/(2.*a);
float adjust = 4.*a*c/(b*b);
if (adjust < 1.) {
float offset = sqrt(1. - adjust);
return vec2(
scale * (1. - offset),
scale * (1. + offset)
);
} else {
// these parameters describe points behind the camera, so the
// corresponding fragments won't be drawn
return vec2(-1., -1.);
}
}
void main() {
vec2 scr = (2.*gl_FragCoord.xy - resolution) / shortdim;
vec3 dir = vec3(focal_slope * scr, -1.);
// cast rays through the spheres
const int SPHERE_MAX_INTERNAL = 6;
taggedFrag frags [2*SPHERE_MAX_INTERNAL];
int layer_cnt = 0;
for (int id = 0; id < sphere_cnt; ++id) {
// find out where the ray hits the sphere
vec2 hit_depths = sphere_cast(sphere_list[id], dir);
// insertion-sort the fragments we hit into the fragment list
for (int side = 0; side < 2; ++side) {
if (hit_depths[side] > 0.) {
for (int layer = layer_cnt; layer >= 0; --layer) {
if (layer < 1 || frags[layer-1].pt.z >= -hit_depths[side]) {
// we're not as close to the screen as the fragment
// before the empty slot, so insert here
frags[layer] = sphere_shading(
sphere_list[id],
hit_depths[side] * dir,
color_list[id],
id
);
break;
} else {
// we're closer to the screen than the fragment before
// the empty slot, so move that fragment into the empty
// slot
frags[layer] = frags[layer-1];
}
}
++layer_cnt;
}
}
}
/* DEBUG */
// in debug mode, show the layer count instead of the shaded image
if (debug_mode) {
// at the bottom of the screen, show the color scale instead of the
// layer count
if (gl_FragCoord.y < 10.) layer_cnt = 2 * int(8. * gl_FragCoord.x / resolution.x);
// convert number to color
vec3 color;
if (layer_cnt % 2 == 0) {
ivec3 bits = layer_cnt / ivec3(2, 4, 8);
color = mod(vec3(bits), 2.);
} else {
color = vec3(0.5);
}
outColor = vec4(color, 1.);
return;
}
// highlight intersections and cusps
for (int i = layer_cnt-1; i >= 1; --i) {
// intersections
taggedFrag frag0 = frags[i];
taggedFrag frag1 = frags[i-1];
float ixn_sin = length(cross(frag0.normal, frag1.normal));
vec3 disp = frag0.pt - frag1.pt;
float ixn_dist = max(
abs(dot(frag1.normal, disp)),
abs(dot(frag0.normal, disp))
) / ixn_sin;
float ixn_highlight = 0.5 * highlight * (1. - smoothstep(2./3.*ixn_threshold, 1.5*ixn_threshold, ixn_dist));
frags[i].color = mix(frags[i].color, vec4(1.), ixn_highlight);
frags[i-1].color = mix(frags[i-1].color, vec4(1.), ixn_highlight);
// cusps
float cusp_cos = abs(dot(dir, frag0.normal));
float cusp_threshold = 2.*sqrt(ixn_threshold * sphere_list[frag0.id].lt.s);
float cusp_highlight = highlight * (1. - smoothstep(2./3.*cusp_threshold, 1.5*cusp_threshold, cusp_cos));
frags[i].color = mix(frags[i].color, vec4(1.), cusp_highlight);
}
// composite the sphere fragments
vec3 color = vec3(0.);
for (int i = layer_cnt-1; i >= layer_threshold; --i) {
if (frags[i].pt.z < 0.) {
vec4 frag_color = frags[i].color;
color = mix(color, frag_color.rgb, frag_color.a);
}
}
outColor = vec4(sRGB(color), 1.);
}