How to implement textureCube using 6 sampler2D - glsl

Before I used a samplerCube to render the cube
This is my previous fragmentShader code
"uniform samplerCube tCubeTgt;",
"varying vec3 posTgt;",
"void main() {",
"vec4 reflectedColorTgt = textureCube( tCubeTgt, vec3( -posTgt.x, posTgt.yz ) );",
"gl_FragColor = reflectedColorTgt ;",
"}"
Now I want to use 6 sampler2D to render a cube.
what should I do

Why?
In any case looking up how cube mapping works there's a function and table in the OpenGL ES 2.0 spec showing how cube mapping works
where sc, tc, and ma come from this table
Major Axis Direction| Target |sc |tc |ma |
--------------------+---------------------------+---+---+---+
+rx |TEXTURE_CUBE_MAP_POSITIVE_X|−rz|−ry| rx|
−rx |TEXTURE_CUBE_MAP_NEGATIVE_X| rz|−ry| rx|
+ry |TEXTURE_CUBE_MAP_POSITIVE_Y| rx| rz| ry|
−ry |TEXTURE_CUBE_MAP_NEGATIVE_Y| rx|−rz| ry|
+rz |TEXTURE_CUBE_MAP_POSITIVE_Z| rx|−ry| rz|
−rz |TEXTURE_CUBE_MAP_NEGATIVE_Z|−rx|−ry| rz|
--------------------+---------------------------+---+---+---+
Table 3.21: Selection of cube map images based on major axis direction of texture coordinates
Using that you can make functions that will apply the same logic using 6 2d textures instead of one cubemap
"use strict";
/* global document, twgl, requestAnimationFrame */
const vs = `
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform vec3 u_camera;
attribute vec4 position;
attribute vec3 normal;
varying vec3 v_normal;
varying vec3 v_eyeToSurface;
void main() {
vec4 world = u_model * position;
gl_Position = u_projection * u_view * world;
v_eyeToSurface = world.xyz - u_camera;
v_normal = (u_model * vec4(normal, 0)).xyz;
}
`;
const fs = `
precision mediump float;
varying vec3 v_eyeToSurface;
varying vec3 v_normal;
uniform sampler2D u_textures[6];
void cubemap(vec3 r, out float texId, out vec2 st) {
vec3 uvw;
vec3 absr = abs(r);
if (absr.x > absr.y && absr.x > absr.z) {
// x major
float negx = step(r.x, 0.0);
uvw = vec3(r.zy, absr.x) * vec3(mix(-1.0, 1.0, negx), -1, 1);
texId = negx;
} else if (absr.y > absr.z) {
// y major
float negy = step(r.y, 0.0);
uvw = vec3(r.xz, absr.y) * vec3(1.0, mix(1.0, -1.0, negy), 1.0);
texId = 2.0 + negy;
} else {
// z major
float negz = step(r.z, 0.0);
uvw = vec3(r.xy, absr.z) * vec3(mix(1.0, -1.0, negz), -1, 1);
texId = 4.0 + negz;
}
st = vec2(uvw.xy / uvw.z + 1.) * .5;
}
vec4 texCubemap(vec3 uvw) {
float texId;
vec2 st;
cubemap(uvw, texId, st);
vec4 color = vec4(0);
for (int i = 0; i < 6; ++i) {
vec4 side = texture2D(u_textures[i], st);
float select = step(float(i) - 0.5, texId) *
step(texId, float(i) + .5);
color = mix(color, side, select);
}
return color;
}
void main() {
vec3 normal = normalize(v_normal);
vec3 eyeToSurface = normalize(v_eyeToSurface);
gl_FragColor = texCubemap(reflect(eyeToSurface, normal));
}
`;
const m4 = twgl.m4;
const gl = document.getElementById("c").getContext("webgl");
// compile shaders, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
// create buffers
const models = [
twgl.primitives.createSphereBufferInfo(gl, 1, 12, 8),
twgl.primitives.createCubeBufferInfo(gl, 1.5),
twgl.primitives.createTorusBufferInfo(gl, .7, .5, 12, 8),
];
const textures = twgl.createTextures(gl, {
posx: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posx.jpg', },
negx: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negx.jpg', },
posy: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posy.jpg', },
negy: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negy.jpg', },
posz: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/posz.jpg', },
negz: { minMag: gl.LINEAR, wrap: gl.CLAMP_TO_EDGE, crossOrigin: "anonymous", src:'https://twgljs.org/examples/images/yokohama/negz.jpg', },
});
const uniforms = {
u_textures: [
textures.posx,
textures.negx,
textures.posy,
textures.negy,
textures.posz,
textures.negz,
],
};
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.enable(gl.DEPTH_TEST);
gl.enable(gl.CULL_FACE);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
const fov = 30 * Math.PI / 180;
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.5;
const zFar = 20;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [
Math.sin(time) * 7,
Math.sin(time * .5) * 3,
Math.cos(time) * 7,
];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
uniforms.u_camera = eye;
uniforms.u_projection = projection;
uniforms.u_view = view;
gl.useProgram(programInfo.program);
models.forEach((bufferInfo, ndx) => {
let u = ndx / (models.length - 1) * 2 - 1;
let model = m4.translation([u * (models.length - 1), 0, 0]);
model = m4.rotateY(model, time * (ndx + 1) * 0.7);
uniforms.u_model = m4.rotateX(model, time * (ndx + 1) * 0.2);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, uniforms);
gl.drawElements(gl.TRIANGLES, bufferInfo.numElements, gl.UNSIGNED_SHORT, 0);
});
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body {
margin: 0;
}
canvas {
display: block;
width: 100vw;
height: 100vh;
}
<canvas id="c"></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
But you'll see there's lots of issues. One issue is what to do at the edge. The GPU can filter across edges? We'd have to add something to do that. Also we can't use random access for samplers nor can we conditionally access samplers which means doing it with 6 2d textures instead of one cubemap is going to be much much slower

Related

Rectangular/square gradient in shader

my first question got closed. Since then I've added some code and made some progress. However, the gradient generated is circular and I currently don't know how to transform it into a square.
Here is my current result:
Target result(along these lines):
Here is my fragment shader:
precision highp float;
varying vec2 vTextureCoord;
uniform float centerX;
uniform float centerY;
uniform vec4 colors[4];
uniform float steps[4];
void main() {
vec3 map = vec3(vTextureCoord, 1);
vec2 uv = map.xy;
float dist = distance(vTextureCoord, vec2(centerX, centerY));
highp vec4 col = colors[0];
for (int i = 1; i < 4; ++i) {
col = mix(col, colors[i], smoothstep(steps[i - 1], steps[i], dist));
}
float factor = max(abs(uv.x - centerX), abs(uv.y - centerY));
float c = 1. - max(abs(uv.x - centerX), abs(uv.y - centerY));
vec4 finalColor = vec4((col.r - factor), (col.g - factor), (col.b - factor), 1.);
gl_FragColor = finalColor;
}
The parameters passed are:
Colors: [[1, 0, 0], [1, 1, 1], [1, 0, 0], [0, 1, 0]]
Steps: [0, 0.29, 0.35, 1]
Using texture coordinates
Below is an example that uses texture coordinates in the range 0 - 1 which makes the center of the gradient at 0.5, 0.5. Thus to compute the gradient we must normalize the distance from the center from the range 0 - 0.5 to 0 - 1. This is done by dividing by 0.5 or the reciprocal (as in example) multiplying by 2 (as multiplication is always the better option than division)
Simplifying the shader
Also your method of calculating the gradient color at each fragment is computationally expensive. For each gradient (3 in this case) you call smoothstep and then mix, yet for each fragment 2 of those calculations do nothing of consequence to the computed color.
The example below reduces the computations by checking if the distance is within a particular gradient, and only if within then computes the color assigning to gl_FragColor and then breaks out of the loop
I can not workout if you want the gradient to darken to the edges as your first image and code (and accepted answer) suggest, or it is the second image in your question that is the result you want. The example assumes that you want the second image.
const shaders = {
vs: `attribute vec2 vert;
varying vec2 uv;
void main() {
uv = (vert + 1.0) / 2.0; // normalize texture coords
gl_Position = vec4(vert, 0.0, 1.0);
}`,
fs: `precision mediump float;
varying vec2 uv;
uniform vec3 colors[4];
uniform float steps[4];
void main(){
vec2 gradXY = abs(uv - 0.5); // 0.5 is centerX, centerY
float dist = pow(max(gradXY.x, gradXY.y) * 2.0, 2.0);
float start = steps[0];
for (int i = 1; i < 4; i++) {
float end = steps[i];
if (dist >= start && dist <= end) {
gl_FragColor = vec4(mix(colors[i - 1], colors[i], (dist-start) / (end-start)), 1);
break;
}
start = end;
}
}`,};
const F32A = a => new Float32Array(a), UI16A = a => new Uint16Array(a);
const GLBuffer = (data, type = gl.ARRAY_BUFFER, use = gl.STATIC_DRAW, buf) => (gl.bindBuffer(type, buf = gl.createBuffer()), gl.bufferData(type, data, use), buf);
const GLLocs = (shr, type, ...names) => names.reduce((o,name) => (o[name] = (gl[`get${type}Location`])(shr, name), o), {});
const GLShader = (prg, source, type = gl.FRAGMENT_SHADER, shr) => {
gl.shaderSource(shr = gl.createShader(type), source);
gl.compileShader(shr);
gl.attachShader(prg, shr);
}
var W;
const gl = canvas.getContext("webgl");
requestAnimationFrame(render);
addEventListener("resize", render);
const prog = gl.createProgram();
GLShader(prog, shaders.vs, gl.VERTEX_SHADER);
GLShader(prog, shaders.fs);
gl.linkProgram(prog);
gl.useProgram(prog);
const locs = GLLocs(prog, "Uniform", "colors", "steps");
const vert = GLLocs(prog, "Attrib", "vert").vert;
GLBuffer(F32A([-1,-1, 1,-1, 1,1, -1,1]));
GLBuffer(UI16A([1,2,3, 0,1,3]), gl.ELEMENT_ARRAY_BUFFER);
gl.enableVertexAttribArray(vert);
gl.vertexAttribPointer(vert, 2, gl.FLOAT, false, 0, 0);
function render() {
gl.viewport(0, 0, W = canvas.width = Math.min(innerWidth,innerHeight), canvas.height = W);
gl.uniform3fv(locs.colors, F32A([1,1,1, 1,0,0, 1,1,1, 0,0,0]));
gl.uniform1fv(locs.steps, F32A([0, 1/3, 2/3, 1]));
gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0);
}
body {
margin: 0px;
}
canvas {
position: absolute;
top: 0px;
left: 0px;
background: black;
}
<canvas id="canvas"></canvas>
Question ambiguities
The are a number of ambiguities in your question which are addressed in the following
The pow function in the example line ...
float dist = pow(max(gradXY.x, gradXY.y) * 2.0, 2.0);
... is the an approximation of your use of smoothstep(steps[i - 1], steps[i], dist) when you calculate the col (assuming the dist range of 0 - 0.5). If you want the full Hermite curve you can replace the line with ...
float distL = max(gradXY.x, gradXY.y) * 2.0;
float dist = distL * distL * (3.0 - 2.0 * distL);
.. and if you want the darkening to the edge as in the questions first image use the following line when calculating the frag color. NOTE assuming colors are vec4 not vec3 make appropriate mods if you use the example code.
FragColor = mix(colors[i - 1], colors[i], (dist-start) / (end-start)) - vec4(vec3(distL * 0.5),0);
or if not using Hermite curve
FragColor = mix(colors[i - 1], colors[i], (dist-start) / (end-start)) - vec4(vec3(dist * 0.5),0);
A square gradient can be achieved by computing the maximum distance of both axis:
float dist = distance(vTextureCoord, vec2(centerX, centerY));
vec2 distV = vTextureCoord - vec2(centerX, centerY);
float dist = max(abs(distV.x), abs(distV.y));
Complete example:
(function loadscene() {
var canvas, gl, vp_size, prog, bufObj = {};
function initScene() {
canvas = document.getElementById( "ogl-canvas");
gl = canvas.getContext( "experimental-webgl" );
if ( !gl )
return;
progDraw = gl.createProgram();
for (let i = 0; i < 2; ++i) {
let source = document.getElementById(i==0 ? "draw-shader-vs" : "draw-shader-fs").text;
let shaderObj = gl.createShader(i==0 ? gl.VERTEX_SHADER : gl.FRAGMENT_SHADER);
gl.shaderSource(shaderObj, source);
gl.compileShader(shaderObj);
let status = gl.getShaderParameter(shaderObj, gl.COMPILE_STATUS);
if (!status) alert(gl.getShaderInfoLog(shaderObj));
gl.attachShader(progDraw, shaderObj);
gl.linkProgram(progDraw);
}
status = gl.getProgramParameter(progDraw, gl.LINK_STATUS);
if ( !status ) alert(gl.getProgramInfoLog(progDraw));
progDraw.inPos = gl.getAttribLocation(progDraw, "inPos");
progDraw.u_time = gl.getUniformLocation(progDraw, "u_time");
progDraw.u_resolution = gl.getUniformLocation(progDraw, "u_resolution");
gl.useProgram(progDraw);
var pos = [ -1, -1, 1, -1, 1, 1, -1, 1 ];
var inx = [ 0, 1, 2, 0, 2, 3 ];
bufObj.pos = gl.createBuffer();
gl.bindBuffer( gl.ARRAY_BUFFER, bufObj.pos );
gl.bufferData( gl.ARRAY_BUFFER, new Float32Array( pos ), gl.STATIC_DRAW );
bufObj.inx = gl.createBuffer();
bufObj.inx.len = inx.length;
gl.bindBuffer( gl.ELEMENT_ARRAY_BUFFER, bufObj.inx );
gl.bufferData( gl.ELEMENT_ARRAY_BUFFER, new Uint16Array( inx ), gl.STATIC_DRAW );
gl.enableVertexAttribArray( progDraw.inPos );
gl.vertexAttribPointer( progDraw.inPos, 2, gl.FLOAT, false, 0, 0 );
gl.enable( gl.DEPTH_TEST );
gl.clearColor( 0.0, 0.0, 0.0, 1.0 );
window.onresize = resize;
resize();
requestAnimationFrame(render);
}
function resize() {
//vp_size = [gl.drawingBufferWidth, gl.drawingBufferHeight];
vp_size = [window.innerWidth, window.innerHeight];
//vp_size = [256, 256]
canvas.width = vp_size[0];
canvas.height = vp_size[1];
}
function render(deltaMS) {
gl.viewport( 0, 0, canvas.width, canvas.height );
gl.clear( gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT );
gl.uniform1f(progDraw.u_time, deltaMS/1000.0);
gl.uniform2f(progDraw.u_resolution, canvas.width, canvas.height);
gl.drawElements( gl.TRIANGLES, bufObj.inx.len, gl.UNSIGNED_SHORT, 0 );
requestAnimationFrame(render);
}
initScene();
})();
<script id="draw-shader-vs" type="x-shader/x-vertex">
#version 100
attribute vec2 inPos;
varying vec2 ndcPos;
void main()
{
ndcPos = inPos;
gl_Position = vec4( inPos.xy, 0.0, 1.0 );
}
</script>
<script id="draw-shader-fs" type="x-shader/x-fragment">
precision mediump float;
varying vec2 ndcPos; // normaliced device coordinates in range [-1.0, 1.0]
uniform float u_time;
uniform vec2 u_resolution;
#define FILL
void main()
{
vec4 colors[4];
colors[0] = vec4(1.0, 0.0, 0.0, 1.0);
colors[1] = vec4(0.0, 1.0, 0.0, 1.0);
colors[2] = vec4(0.0, 0.0, 1.0, 1.0);
colors[3] = vec4(1.0, 1.0, 1.0, 1.0);
float steps[4];
steps[0] = 0.2;
steps[1] = 0.4;
steps[2] = 0.6;
steps[3] = 0.8;
vec2 uv = ndcPos.xy;
uv.x *= u_resolution.x / u_resolution.y;
vec2 vTextureCoord = uv;
float centerX = 0.0;
float centerY = 0.0;
//float dist = distance(vTextureCoord, vec2(centerX, centerY));
vec2 distV = vTextureCoord - vec2(centerX, centerY);
float dist = max(abs(distV.x), abs(distV.y));
highp vec4 col = colors[0];
for (int i = 1; i < 4; ++i) {
col = mix(col, colors[i], smoothstep(steps[i - 1], steps[i], dist));
}
float factor = max(abs(uv.x - centerX), abs(uv.y - centerY));
float c = 1. - max(abs(uv.x - centerX), abs(uv.y - centerY));
vec4 finalColor = vec4((col.r - factor), (col.g - factor), (col.b - factor), 1.);
gl_FragColor = finalColor;
}
</script>
<canvas id="ogl-canvas" style="border: none"></canvas>

Fisheye Skybox Shader

I'm trying to write a panaorama viewer. Mostly this involves mapping an image to a quad to simulate a skybox.
For a cubemap image it's pretty trival. Either copy the 6 parts of the image to a 6 planes of a cube map or else make a shader that does the cubemap math as specified in the OpenGL ES spec
For an equirectangular image like from a Ricoh Theta
you can use this math
// convert from direction (n) to texcoord (uv)
float latitude = acos(n.y);
float longitude = atan(n.z, n.x);
vec2 sphereCoords = vec2(longitude, latitude) * vec2(0.5 / PI, 1.0 / PI);
vec2 uv = fract(vec2(0.5,1.0) - sphereCoords);
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float latitude = acos(n.y);
float longitude = atan(n.z, n.x);
vec2 sphereCoords = vec2(longitude, latitude) * vec2(0.5 / PI, 1.0 / PI);
vec2 uv = fract(vec2(0.5,1.0) - sphereCoords);
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/ChIfXM0.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, Math.sin(time * 0.3) * 0.5, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
But some images are not equirectangular, they're fisheye?
I've been trying to figure out the math needed to do the same thing (map this to a quad based skybox) but I've been having no luck
As a reference I found this page with conversions from 3d to fisheye coords. It says
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), n.z) / PI;
float theta = atan(n.y, n.x);
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + 0.5;
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), n.z) / PI;
float theta = atan(n.y, n.x);
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + 0.5;
#if 0
// Calculate fisheye angle and radius
float theta = atan(n.z, n.x);
float phi = atan(length(n.xz), n.y);
float r = phi / PI;
// Pixel in fisheye space
vec2 uv = vec2(0.5) + r * vec2(cos(theta), sin(theta));
#endif
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/dzXCQwM.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, 0.7 + Math.sin(time * 0.3) * .7, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>
Also this
Clearly I'm missing something.
I believe the error lies in this logic:
// multiply u by 2 because we only have a 180degree view
gl_FragColor = texture2D(u_skybox, uv * vec2(-2, 1));
Although this works in the equirectangular case because the math works out such that the z-component only affects longitude, it is no longer valid in the fisheye case because n.z affects both axes.
You can account for the negative z-component in the formula by taking the absolute value of n.z and flipping n.x when the z is negative:
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), abs(n.z)) / PI;
float theta = atan(n.y, n.x * sign(n.z));
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + vec2(0.5);
Here it is in action:
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute vec4 position;
varying vec4 v_position;
void main() {
v_position = position;
gl_Position = position;
gl_Position.z = 1.0;
}
`;
const fs = `
precision highp float;
uniform sampler2D u_skybox;
uniform mat4 u_viewDirectionProjectionInverse;
varying vec4 v_position;
#define PI radians(180.0)
void main() {
vec4 t = u_viewDirectionProjectionInverse * v_position;
vec3 n = normalize(t.xyz / t.w);
// convert from direction (n) to texcoord (uv)
float r = 2.0 * atan(length(n.xy), abs(n.z)) / PI;
float theta = atan(n.y, n.x * sign(n.z));
vec2 uv = vec2(cos(theta), sin(theta)) * r * 0.5 + vec2(0.5);
gl_FragColor = texture2D(u_skybox, uv * vec2(-1.0, 1.0));
}
`;
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const tex = twgl.createTexture(gl, {
src: 'https://i.imgur.com/dzXCQwM.jpg',
flipY: true,
});
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl, 2);
function render(time) {
time *= 0.001;
twgl.resizeCanvasToDisplaySize(gl.canvas);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const projectionMatrix = m4.perspective(45 * Math.PI / 180, aspect, 1, 20);
const cameraMatrix = m4.rotationY(time * 0.1);
m4.rotateX(cameraMatrix, 0.7 + Math.sin(time * 0.3) * .7, cameraMatrix);
const viewMatrix = m4.inverse(cameraMatrix);
viewMatrix[12] = 0;
viewMatrix[13] = 0;
viewMatrix[14] = 0;
const viewDirectionProjectionMatrix = m4.multiply(projectionMatrix, viewMatrix);
const viewDirectionProjectionInverseMatrix = m4.inverse(viewDirectionProjectionMatrix);
gl.useProgram(programInfo.program);
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
twgl.setUniforms(programInfo, {
u_viewDirectionProjectionInverse: viewDirectionProjectionInverseMatrix,
u_skyBox: tex,
});
twgl.drawBufferInfo(gl, bufferInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
<canvas></canvas>

webgl: fastest approach to drawing many circles

I'm currently drawing thousands of circles, instancing a circle geometry (many triangles).
alternatively, I could simply instance a quad (2 triangles), but cut out a circle in the fragment shader, using a distance function and discard.
which approach would be faster? -- is drawing many triangles more expensive than the calculations done in the fragment shader?
The fastest way might depend on the GPU and lots of other factors like how you're drawing the circles, 2D, 3D, are you blending them, are you using the z-buffer, etc... but in general, less triangles is faster than more, and less pixels is faster than more. So...., all we can really do is try.
First lets just draw textured quads with no blending. First off I always seem to get inconsistent perf from WebGL but in my tests on my GPU I get 20k-30k quads at 60fps in this 300x150 canvas using instancing
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord) * v_color;
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
{
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 32;
ctx.canvas.height = 32;
ctx.fillStyle = 'white';
ctx.beginPath();
ctx.arc(16, 16, 15, 0, Math.PI * 2);
ctx.fill();
const tex = twgl.createTexture(gl, { src: ctx.canvas });
}
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
And I get the same perf at 60fps using repeated to geometry instead of instancing. That's surprising to me because 7-8yrs ago when I tested repeated geometry was 20-30% faster. Whether that's because of having a better GPU now or a better driver or what I have no idea.
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
uniform sampler2D tex;
void main() {
gl_FragColor = texture2D(tex, v_texcoord) * v_color;
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const quadPositions = [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
];
const quadTexcoords = [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
];
const positions = new Float32Array(maxCount * 2 * 6);
const texcoords = new Float32Array(maxCount * 2 * 6);
for (let i = 0; i < maxCount; ++i) {
const off = i * 2 * 6;
positions.set(quadPositions, off);
texcoords.set(quadTexcoords, off);
}
const ids = new Float32Array(maxCount * 6);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i / 6 | 0;
}
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: positions,
},
texcoord: texcoords,
id: {
numComponents: 1,
data: ids,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
{
const ctx = document.createElement('canvas').getContext('2d');
ctx.canvas.width = 32;
ctx.canvas.height = 32;
ctx.fillStyle = 'white';
ctx.beginPath();
ctx.arc(16, 16, 15, 0, Math.PI * 2);
ctx.fill();
const tex = twgl.createTexture(gl, { src: ctx.canvas });
}
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
gl.drawArrays(gl.TRIANGLES, 0, 6 * count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
Next thing would be textures or computing a circle in the fragment shader.
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
gl_FragColor = mix(
v_color,
vec4(0),
step(1.0, length(v_texcoord.xy * 2. - 1.)));
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
I get no measureable difference. Trying your circle function
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
float circle(in vec2 st, in float radius) {
vec2 dist = st - vec2(0.5);
return 1.0 - smoothstep(
radius - (radius * 0.01),
radius +(radius * 0.01),
dot(dist, dist) * 4.0);
}
void main() {
gl_FragColor = mix(
vec4(0),
v_color,
circle(v_texcoord, 1.0));
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
I again get no measurable difference. Note: like I said above I get wildly inconsistent results in WebGL. When I ran the first test I got 28k at 60fps. When I ran the second I got 23k. I was surprised since I expected the 2nd to be faster so I ran the first again and only got 23k. The last one I got 29k and was again surprise but then I went back and did the previous and got 29k. Basically that means testing timing in WebGL is nearly impossible. There are so many moving parts given everything is multi-process that getting constant results seems impossible.
Could try discard
function main() {
const gl = document.querySelector('canvas').getContext('webgl');
const ext = gl.getExtension('ANGLE_instanced_arrays');
if (!ext) {
return alert('need ANGLE_instanced_arrays');
}
twgl.addExtensionsToContext(gl);
const vs = `
attribute float id;
attribute vec4 position;
attribute vec2 texcoord;
uniform float time;
varying vec2 v_texcoord;
varying vec4 v_color;
void main() {
float o = id + time;
gl_Position = position + vec4(
vec2(
fract(o * 0.1373),
fract(o * 0.5127)) * 2.0 - 1.0,
0, 0);
v_texcoord = texcoord;
v_color = vec4(fract(vec3(id) * vec3(0.127, 0.373, 0.513)), 1);
}`;
const fs = `
precision mediump float;
varying vec2 v_texcoord;
varying vec4 v_color;
float circle(in vec2 st, in float radius) {
vec2 dist = st - vec2(0.5);
return 1.0 - smoothstep(
radius - (radius * 0.01),
radius +(radius * 0.01),
dot(dist, dist) * 4.0);
}
void main() {
if (circle(v_texcoord, 1.0) < 0.5) {
discard;
}
gl_FragColor = v_color;
}
`;
// compile shaders, link program, look up locations
const programInfo = twgl.createProgramInfo(gl, [vs, fs]);
const maxCount = 250000;
const ids = new Float32Array(maxCount);
for (let i = 0; i < ids.length; ++i) {
ids[i] = i;
}
const x = 16 / 300 * 2;
const y = 16 / 150 * 2;
const bufferInfo = twgl.createBufferInfoFromArrays(gl, {
position: {
numComponents: 2,
data: [
-x, -y,
x, -y,
-x, y,
-x, y,
x, -y,
x, y,
],
},
texcoord: [
0, 1,
1, 1,
0, 0,
0, 0,
1, 1,
1, 0,
],
id: {
numComponents: 1,
data: ids,
divisor: 1,
}
});
twgl.setBuffersAndAttributes(gl, programInfo, bufferInfo);
const fpsElem = document.querySelector('#fps');
const countElem = document.querySelector('#count');
let count;
function getCount() {
count = Math.min(maxCount, parseInt(countElem.value));
}
countElem.addEventListener('input', getCount);
getCount();
const maxHistory = 60;
const fpsHistory = new Array(maxHistory).fill(0);
let historyNdx = 0;
let historyTotal = 0;
let then = 0;
function render(now) {
const deltaTime = now - then;
then = now;
historyTotal += deltaTime - fpsHistory[historyNdx];
fpsHistory[historyNdx] = deltaTime;
historyNdx = (historyNdx + 1) % maxHistory;
fpsElem.textContent = (1000 / (historyTotal / maxHistory)).toFixed(1);
gl.useProgram(programInfo.program);
twgl.setUniforms(programInfo, {time: now * 0.001});
ext.drawArraysInstancedANGLE(gl.TRIANGLES, 0, 6, count);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
canvas { display: block; border: 1px solid black; }
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>
<div>fps: <span id="fps"></span></div>
<div>count: <input type="number" id="count" min="0" max="1000000" value="25000"></div>
Given the inconsistent timing I can't be sure but my impression is discard is slower. IIRC discard is slow because without discard the GPU knows even before it executes the fragment shader that it's going to update the z-buffer where as with discard it doesn't know until after the shader executes and that that difference means certain things can't be optimized as well.
I'm going to stop there because there's just too many combinations of things to try.
We could try blending on. Blending is also generally slower though since it has to blend (read the background) but is it slower than discard? I don't know.
Do you have the depth test on? If so then draw order will be important.
Yet another thing to test is using non-quads like hexgons or octogons as that would run less pixels through the fragment shader. I suspect you might need to make the circles bigger to see that but if we have a 100x100 pixel quad that's 10k pixels. If we have perfect circle geometry that's about pi*r^2 or ~7853 or 21% less pixels. A Hexagon would be ~8740 pixels or 11% less. An octogon somewhere in between. Drawing 11% to 21% less pixels is usually a win but of course course for hexagon you'd be drawing 3x more triangles, for an octogon 4x more. You'd basically have to test all these cases.
That points out another issue in that I believe you'd get different relative results with larger circles on a larger canvas since there'd be more pixels per circle so for any given number of circles drawn more % of time would be spent drawing pixels and less calculating vertices and/or less time restarting the GPU to draw the next circle.
Update
Testing on Chrome vs Firefox I got 60k-66k in all cases in Chrome on the same machine. No idea why the difference is so vast given that WebGL itself is doing almost nothing. All 4 tests only have a single draw call per frame. But whatever, at least as of 2019-10 Chrome as more than twice as fast for this particular case than Firefox
One idea is I have a dual GPU laptop. When you create the context you can tell WebGL what you're targeting by passing in the powerPreference context creation attribute as in in
const gl = document.createContext('webgl', {
powerPreference: 'high-performance',
});
The options are 'default', 'low-power', 'high-performance'. 'default' means "let the browser decide" but ultimately all of them mean "let the browser decide". In any case setting that above didn't change anything in firefox for me.

Alpha channel fade animation

Im pretty new to shaders and I've been attempting to create a shader that would do a alpha revel of a texture and I've gotten close but Im pretty sure there is a much better way.
This is what I have to far https://codepen.io/tkmoney/pen/REYrpV
varying vec2 vUv;
precision highp float;
precision highp int;
uniform sampler2D texture;
uniform float mask_position;
uniform float fade_size;
void main(void) {
float mask_starting_point = (0.0 - fade_size);
float mask_ending_point = (1.0 - fade_size);
vec4 orig_color = texture2D(texture, vUv);
vec4 color = texture2D(texture, vUv);
float mask_p = smoothstep(mask_starting_point, mask_ending_point, mask_position);
//color.a *= (distance(vUv.x, split_center_point));
vec2 p = vUv;
if (p.x > (mask_p)){
color.a = 0.0;
}else{
color.a *= (smoothstep(mask_position, (mask_position - fade_size), p.x ));
}
gl_FragColor = color;
}
the fade in does not completely reveal the entire image as you can see. Any insight on a better way to tackle this would be great. Thanks!
What you want to do is to make the area at the left of mask_position visible, but you want to hide the are at the right. this can be achieved by step:
color.a *= 1.0 - step(fade_size, p);
If you want a smooth transition from the visible to the invisible area, then you have to use smoothstep. The start of the fading is a certain amount before mask_position and the end a certain amount after mask_position:
float start_p = mask_position-fade_size;
float end_p = mask_position+fade_size;
color.a *= 1.0 - smoothstep(start_p, end_p, vUv.x);
This will cause that the start and the end of the image will never fade in completely. TO compensate this the [vUV.x] has to be mapped from the range [0.0, 1.0] to the range [fade_size, 1.0-fade_size]. This can be calculated by mix with ease:
float p = vUv.x * (1.0-2.0*fade_size) + fade_size;
color.a *= 1.0 - smoothstep(start_p, end_p, p;
If the alpha channel of the final culler is below a tiny threshold, the fragment can be discarded:
if ( color.a < 0.01 )
discard;
Final shader:
varying vec2 vUv;
precision highp float;
precision highp int;
uniform sampler2D texture;
uniform float mask_position;
uniform float fade_size;
void main(void) {
vec4 color = texture2D(texture, vUv);
float start_p = mask_position-fade_size;
float end_p = mask_position+fade_size;
float p = mix(fade_size, 1.0-fade_size, vUv.x);
color.a *= 1.0 - smoothstep(start_p, end_p, p);
if ( color.a < 0.01 )
discard;
gl_FragColor = color;
}
See the example:
var container;
var camera, scene, renderer;
var uniforms;
init();
animate();
function init() {
container = document.getElementById( 'container' );
camera = new THREE.Camera();
camera.position.z = 1;
scene = new THREE.Scene();
var geometry = new THREE.PlaneBufferGeometry( 2, 2 );
var texture = new THREE.TextureLoader().load( 'https://raw.githubusercontent.com/Rabbid76/graphics-snippets/master/resource/texture/background.jpg' );
uniforms = {
u_time: { type: "f", value: 1.0 },
u_resolution: { type: "v2", value: new THREE.Vector2() },
u_mouse: { type: "v2", value: new THREE.Vector2() },
texture: {type: 't', value: texture},
fade_size: { type: 'f', value: 0.2 },
mask_position: { type: 'f', value: 0 }
};
var material = new THREE.ShaderMaterial( {
uniforms: uniforms,
vertexShader: document.getElementById( 'vertexShader' ).textContent,
fragmentShader: document.getElementById( 'fragmentShader' ).textContent
} );
var mesh = new THREE.Mesh( geometry, material );
scene.add( mesh );
renderer = new THREE.WebGLRenderer( {alpha : true} );
renderer.setClearColor(0xffffff, 0.0);
renderer.setPixelRatio( window.devicePixelRatio );
container.appendChild( renderer.domElement );
onWindowResize();
window.addEventListener( 'resize', onWindowResize, false );
document.onmousemove = function(e){
uniforms.u_mouse.value.x = e.pageX
uniforms.u_mouse.value.y = e.pageY
}
}
function onWindowResize( event ) {
renderer.setSize( window.innerWidth, window.innerHeight );
uniforms.u_resolution.value.x = renderer.domElement.width;
uniforms.u_resolution.value.y = renderer.domElement.height;
}
function animate() {
requestAnimationFrame( animate );
render();
}
var mask_step = 0.01;
var mask_val = 0.0;
function render() {
if ( mask_val >= 1.0) { mask_val = 1.0; mask_step = -0.01; }
else if ( mask_val <= -0.0) { mask_val = 0.0; mask_step = 0.01; }
mask_val += mask_step;
uniforms.mask_position.value = mask_val;
uniforms.u_time.value += 0.05;
renderer.render( scene, camera );
}
<script id="vertexShader" type="x-shader/x-vertex">
void main() {
gl_Position = vec4( position, 1.0 );
}
</script>
<script id="fragmentShader" type="x-shader/x-fragment">
uniform vec2 u_resolution;
uniform float u_time;
uniform sampler2D texture;
uniform float fade_size;
uniform float mask_position;
void main() {
vec2 vUv = gl_FragCoord.xy/u_resolution.xy;
vec4 color = texture2D(texture, vUv);
float start_p = mask_position-fade_size;
float end_p = mask_position+fade_size;
float p = mix(fade_size, 1.0-fade_size, vUv.x);
color.rgba *= 1.0 - smoothstep(start_p, end_p, p);
if ( color.a < 0.01 )
discard;
gl_FragColor = color;
}
</script>
<div id="container"></div>
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/100/three.min.js"></script>

Implementing depth testing for semi-transparent objects

I've been carefully trolling the internet for the past two days to understand depth testing for semi-transparent objects. I've read multiple papers/tutorials on the subject and in theory I believe I understand how it works. However none of them give me actual example code.
I have three requirements for my depth testing of semi-transparent objects:
It should be order independant.
It should work if two quads of the same objects are intersection each other. Both semi-transparent. Imagine a grass object that looks like a X when viewed from above:
It should correctly render a semi-transparent player rgba(0, 1, 0, 0.5), behind a building's window rgba(0, 0, 1, 0.5), but in front of a background object rgba(1, 0, 0, 1):
The line on the far left is how I imagine the light/color changes as it travels through the semi-transparent objects towards the camera
Final Thoughts
I suspect the best approach to go for is to do depth peeling, but I'm still lacking some implementation/example. I'm leaning towards this approach because the game is 2.5D and since it could get dangerous for performance (lots of layers to peel), there won't need to be more than two semi-transparent objects to "peel".
I'm already familiar with framebuffers and how to code them (doing some post processing effects with them). I will be using them, right?
Most of the knowledge of opengl comes from this tutorial but it covers depth testing and semi-transparency separately. He also sadly doesn't cover order independent transparency at all (see bottom of Blending page).
Finally, please don't answer only in theory. e.g.
Draw opaque, draw transparent, draw opaque again, etc.
My ideal answer will contain code of how the buffers are configured, the shaders, and screenshots of each pass with an explanation of what its doing.
The programming language used is also not too important as long as it uses OpenGL 4 or newer. The non-opengl code can be pseudo (I don't care how you sort an array or create an GLFW window).
EDIT:
I'm updating my question to just have so example of the current state of my code. This example draws the semi-transparent player (green) first, opaque background (red) second and then the semi-transparent window (blue). However the depth should be calculated by the Z position of the square and not the order of which it is drawn.
(function() {
// your page initialization code here
// the DOM will be available here
var script = document.createElement('script');
script.onload = function () {
main();
};
script.src = 'https://mdn.github.io/webgl-examples/tutorial/gl-matrix.js';
document.head.appendChild(script); //or something of the likes
})();
//
// Start here
//
function main() {
const canvas = document.querySelector('#glcanvas');
const gl = canvas.getContext('webgl', {alpha:false});
// If we don't have a GL context, give up now
if (!gl) {
alert('Unable to initialize WebGL. Your browser or machine may not support it.');
return;
}
// Vertex shader program
const vsSource = `
attribute vec4 aVertexPosition;
attribute vec4 aVertexColor;
uniform mat4 uModelViewMatrix;
uniform mat4 uProjectionMatrix;
varying lowp vec4 vColor;
void main(void) {
gl_Position = uProjectionMatrix * uModelViewMatrix * aVertexPosition;
vColor = aVertexColor;
}
`;
// Fragment shader program
const fsSource = `
varying lowp vec4 vColor;
void main(void) {
gl_FragColor = vColor;
}
`;
// Initialize a shader program; this is where all the lighting
// for the vertices and so forth is established.
const shaderProgram = initShaderProgram(gl, vsSource, fsSource);
// Collect all the info needed to use the shader program.
// Look up which attributes our shader program is using
// for aVertexPosition, aVevrtexColor and also
// look up uniform locations.
const programInfo = {
program: shaderProgram,
attribLocations: {
vertexPosition: gl.getAttribLocation(shaderProgram, 'aVertexPosition'),
vertexColor: gl.getAttribLocation(shaderProgram, 'aVertexColor'),
},
uniformLocations: {
projectionMatrix: gl.getUniformLocation(shaderProgram, 'uProjectionMatrix'),
modelViewMatrix: gl.getUniformLocation(shaderProgram, 'uModelViewMatrix'),
},
};
// Here's where we call the routine that builds all the
// objects we'll be drawing.
const buffers = initBuffers(gl);
// Draw the scene
drawScene(gl, programInfo, buffers);
}
//
// initBuffers
//
// Initialize the buffers we'll need. For this demo, we just
// have one object -- a simple two-dimensional square.
//
function initBuffers(gl) {
// Create a buffer for the square's positions.
const positionBuffer0 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer0);
// Now create an array of positions for the square.
var positions = [
0.5, 0.5,
-0.5, 0.5,
0.5, -0.5,
-0.5, -0.5,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
var colors = [
0.0, 1.0, 0.0, 0.5, // white
0.0, 1.0, 0.0, 0.5, // red
0.0, 1.0, 0.0, 0.5, // green
0.0, 1.0, 0.0, 0.5, // blue
];
const colorBuffer0 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer0);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
// Create a buffer for the square's positions.
const positionBuffer1 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer1);
// Now create an array of positions for the square.
positions = [
2.0, 0.4,
-2.0, 0.4,
2.0, -2.0,
-2.0, -2.0,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
colors = [
1.0, 0.0, 0.0, 1.0, // white
1.0, 0.0, 0.0, 1.0, // red
1.0, 0.0, 0.0, 1.0, // green
1.0, 0.0, 0.0, 1.0, // blue
];
const colorBuffer1 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer1);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
// Create a buffer for the square's positions.
const positionBuffer2 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer2);
// Now create an array of positions for the square.
positions = [
1.0, 1.0,
-0.0, 1.0,
1.0, -1.0,
-0.0, -1.0,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
colors = [
0.0, 0.0, 1.0, 0.5, // white
0.0, 0.0, 1.0, 0.5, // red
0.0, 0.0, 1.0, 0.5, // green
0.0, 0.0, 1.0, 0.5, // blue
];
const colorBuffer2 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer2);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
return {
position0: positionBuffer0,
color0: colorBuffer0,
position1: positionBuffer1,
color1: colorBuffer1,
position2: positionBuffer2,
color2: colorBuffer2,
};
}
//
// Draw the scene.
//
function drawScene(gl, programInfo, buffers) {
gl.clearColor(0.0, 0.0, 0.0, 1.0); // Clear to black, fully opaque
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
//gl.clearDepth(1.0); // Clear everything
gl.disable(gl.DEPTH_TEST)
gl.enable(gl.BLEND)
gl.blendEquation(gl.FUNC_ADD)
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
// Clear the canvas before we start drawing on it.
// Create a perspective matrix, a special matrix that is
// used to simulate the distortion of perspective in a camera.
// Our field of view is 45 degrees, with a width/height
// ratio that matches the display size of the canvas
// and we only want to see objects between 0.1 units
// and 100 units away from the camera.
const fieldOfView = 45 * Math.PI / 180; // in radians
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 100.0;
const projectionMatrix = mat4.create();
// note: glmatrix.js always has the first argument
// as the destination to receive the result.
mat4.perspective(projectionMatrix,
fieldOfView,
aspect,
zNear,
zFar);
// Set the drawing position to the "identity" point, which is
// the center of the scene.
const modelViewMatrix = mat4.create();
// Now move the drawing position a bit to where we want to
// start drawing the square.
mat4.translate(modelViewMatrix, // destination matrix
modelViewMatrix, // matrix to translate
[-0.0, 0.0, -6.0]); // amount to translate
function drawSquare(positionbuffer, colorbuffer) {
// Tell WebGL how to pull out the positions from the position
// buffer into the vertexPosition attribute
{
const numComponents = 2;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, positionbuffer);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexPosition,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.vertexPosition);
}
// Tell WebGL how to pull out the colors from the color buffer
// into the vertexColor attribute.
{
const numComponents = 4;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, colorbuffer);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexColor,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.vertexColor);
}
// Tell WebGL to use our program when drawing
gl.useProgram(programInfo.program);
// Set the shader uniforms
gl.uniformMatrix4fv(
programInfo.uniformLocations.projectionMatrix,
false,
projectionMatrix);
gl.uniformMatrix4fv(
programInfo.uniformLocations.modelViewMatrix,
false,
modelViewMatrix);
{
const offset = 0;
const vertexCount = 4;
gl.drawArrays(gl.TRIANGLE_STRIP, offset, vertexCount);
}
}
drawSquare(buffers.position0, buffers.color0); // Player
drawSquare(buffers.position1, buffers.color1); // Background
drawSquare(buffers.position2, buffers.color2); // Window
}
//
// Initialize a shader program, so WebGL knows how to draw our data
//
function initShaderProgram(gl, vsSource, fsSource) {
const vertexShader = loadShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = loadShader(gl, gl.FRAGMENT_SHADER, fsSource);
// Create the shader program
const shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
// If creating the shader program failed, alert
if (!gl.getProgramParameter(shaderProgram, gl.LINK_STATUS)) {
alert('Unable to initialize the shader program: ' + gl.getProgramInfoLog(shaderProgram));
return null;
}
return shaderProgram;
}
//
// creates a shader of the given type, uploads the source and
// compiles it.
//
function loadShader(gl, type, source) {
const shader = gl.createShader(type);
// Send the source to the shader object
gl.shaderSource(shader, source);
// Compile the shader program
gl.compileShader(shader);
// See if it compiled successfully
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
alert('An error occurred compiling the shaders: ' + gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<title></title>
</head>
<body>
<canvas id="glcanvas" width="640" height="480"></canvas>
</body>
</html>
This seems to be the what the paper linked by ripi2 is doing
function main() {
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
uniform mat4 u_matrix;
void main() {
gl_Position = u_matrix * position;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 Ci;
out vec4 fragData[2];
float w(float z, float a) {
return a * max(pow(10.0,-2.0),3.0*pow(10.0,3.0)*pow((1.0 - z), 3.));
}
void main() {
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const fbi = twgl.createFramebufferInfo(
gl,
[
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
]);
function render(time) {
time *= 0.001;
twgl.setBuffersAndAttributes(gl, transparentProgramInfo, bufferInfo);
// drawOpaqueSurfaces();
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_matrix: m4.identity(),
});
twgl.drawBufferInfo(gl, bufferInfo);
twgl.bindFramebufferInfo(gl, fbi);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false);
gl.enable(gl.BLEND);
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
// drawTransparentSurfaces();
const quads = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
quads.forEach((color, ndx) => {
const u = ndx / (quads.length - 1);
// change the order every second
const v = ((ndx + time | 0) % quads.length) / (quads.length - 1);
const xy = (u * 2 - 1) * .25;
const z = (v * 2 - 1) * .25;
let mat = m4.identity();
mat = m4.translate(mat, [xy, xy, z]);
mat = m4.scale(mat, [.3, .3, 1]);
twgl.setUniforms(transparentProgramInfo, {
Ci: color,
u_matrix: mat,
});
twgl.drawBufferInfo(gl, bufferInfo);
});
twgl.bindFramebufferInfo(gl, null);
gl.drawBuffers([gl.BACK]);
gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
gl.useProgram(compositeProgramInfo.program);
twgl.setUniforms(compositeProgramInfo, {
ATexture: fbi.attachments[0],
BTexture: fbi.attachments[1],
u_matrix: m4.identity(),
});
twgl.drawBufferInfo(gl, bufferInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
Some things to note:
It's using WebGL2 but it should be possible in WebGL1, you'd have to change the shaders to use GLSL ES 1.0.
It's using floating point textures. The paper mentions you can use half float textures as well. Note that rendering to both half and float textures is an optional feature in even WebGL2. I believe most mobile hardware can render to half but not to float.
It's using weight equation 10 from the paper. There are 4 weight equations in the paper. 7, 8, 9, and 10. To do 7, 8, or 9 you'd need to pass in view space z from the vertex shader to the fragment shader
It's switching the order of drawing every second
The code is pretty straight forward.
It creates 3 shaders. One to draw a checkerboard just so we have something that is opaque to see the transparent stuff drawn above. One is the transparent object shader. The last is the shader the composites the transparent stuff into the scene.
Next it makes 2 textures, a floating point RGBA32F texture and a floating point R32F texture (red channel only). It attaches those to a framebuffer. (that is all done in the 1 function, twgl.createFramebufferInfo. That function makes the textures the same size as the canvas by default.
We make a single quad that goes from -1 to +1
We use that quad to draw the checkerboard into the canvas
Then we turn on blending, setup the blend equations as the paper said, switch to rendering onto our framebuffer, clear that framebuffer. note, it's cleared to 0,0,0,1 and 1 respectively. This is the version where we don't have separate blend functions per draw buffer. If you switch to the version that can use separate blending functions per draw buffer then you need to clear to different values and use a different shader (See paper)
Using our transparency shader we that same quad to draw 6 rectangles each of a solid color. I just used a solid color to keep it simple. Each is at a different Z and the Zs change every second just to see the results Z changing.
In the shader Ci is the input color. It's expected to be a premultiplied alpha color according to the paper. fragData[0]is the "accumulate" texture andfragData[1]is the "revealage" texture and is only one channel, red. Thew` function represents the equation 10 from the paper.
After all 6 quads are drawn we switch back to rendering to the canvas and use the compositing shader to composite the transparency result with the non-transparent canvas contents.
Here's an example with some geometry. Differences:
It's using equations (7) from the paper instead of (10)
In order to do correct zbuffering the depth buffer needs to be shared when doing opaque and transparent rendering. So there are 2 frames buffers. One buffer has RGBA8 + depth, the other is RGBA32F + R32F + depth. The depth buffer is shared.
The transparent renderer computes simple lighting and then uses the result as the Ci value from the paper
After compositing the transparent into the opaque we still need to copy the opaque into the canvas to see the result
function main() {
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
layout(location=1) in vec3 normal;
uniform mat4 u_projection;
uniform mat4 u_modelView;
out vec4 v_viewPosition;
out vec3 v_normal;
void main() {
gl_Position = u_projection * u_modelView * position;
v_viewPosition = u_modelView * position;
v_normal = (u_modelView * vec4(normal, 0)).xyz;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const opaqueFS = `
#version 300 es
precision highp float;
in vec4 v_viewPosition;
in vec3 v_normal;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
out vec4 fragColor;
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
fragColor = vec4(u_color.rgb * light, u_color.a);
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
in vec4 v_viewPosition;
in vec3 v_normal;
out vec4 fragData[2];
// eq (7)
float w(float z, float a) {
return a * max(
pow(10.0, -2.0),
min(
3.0 * pow(10.0, 3.0),
10.0 /
(pow(10.0, -5.0) +
pow(abs(z) / 5.0, 2.0) +
pow(abs(z) / 200.0, 6.0)
)
)
);
}
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
vec4 Ci = vec4(u_color.rgb * light, u_color.a);
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
}
`;
const blitFS = `
#version 300 es
precision highp float;
uniform sampler2D u_texture;
out vec4 fragColor;
void main() {
fragColor = texelFetch(u_texture, ivec2(gl_FragCoord.xy), 0);
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const blitProgramInfo = twgl.createProgramInfo(gl, [vs, blitFS]);
const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
function makeVAO(programInfo, bufferInfo) {
return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
}
// In order to do proper zbuffering we need to share
// the depth buffer
const opaqueAttachments = [
{ internalFormat: gl.RGBA8, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
];
const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
const transparentAttachments = [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
];
const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// if the canvas is resized also resize the framebuffer
// attachments (the depth buffer will be resized twice
// but I'm too lazy to fix it)
twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
}
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const fov = 45 * Math.PI / 180;
const zNear = 0.1;
const zFar = 500;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, -5];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const lightDirection = v3.normalize([1, 3, 5]);
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.depthMask(true);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
// drawOpaqueSurfaces();
// draw checkerboard
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
// draw a cube with depth buffer
gl.enable(gl.DEPTH_TEST);
{
gl.useProgram(opaqueProgramInfo.program);
gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
let mat = view;
mat = m4.rotateX(mat, time * .1);
mat = m4.rotateY(mat, time * .2);
mat = m4.scale(mat, [1.5, 1.5, 1.5]);
twgl.setUniforms(opaqueProgramInfo, {
u_color: [1, .5, .2, 1],
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
}
twgl.bindFramebufferInfo(gl, transparentFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
// these values change if using separate blend functions
// per attachment (something WebGL2 does not support)
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false); // don't write to depth buffer (but still testing)
gl.enable(gl.BLEND);
// this changes if using separate blend functions per attachment
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);
// drawTransparentSurfaces();
const spheres = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
spheres.forEach((color, ndx) => {
const u = ndx + 2;
let mat = view;
mat = m4.rotateX(mat, time * u * .1);
mat = m4.rotateY(mat, time * u * .2);
mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
twgl.setUniforms(transparentProgramInfo, {
u_color: color,
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
});
// composite transparent results with opaque
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.disable(gl.DEPTH_TEST);
gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
gl.useProgram(compositeProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(compositeProgramInfo, {
ATexture: transparentFBI.attachments[0],
BTexture: transparentFBI.attachments[1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
// draw opaque color buffer into canvas
// could probably use gl.blitFramebuffer
gl.disable(gl.BLEND);
twgl.bindFramebufferInfo(gl, null);
gl.useProgram(blitProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(blitProgramInfo, {
u_texture: opaqueFBI.attachments[0],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
It occurs to me rather than use standard OpenGL blending for the last 2 steps (composite followed by blit) we could change the composite shader so it takes 3 textures (ATexutre, BTexture, opaqueTexture) and blends in the shader outputting directly to the canvas. That would be faster.
function main() {
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
layout(location=1) in vec3 normal;
uniform mat4 u_projection;
uniform mat4 u_modelView;
out vec4 v_viewPosition;
out vec3 v_normal;
void main() {
gl_Position = u_projection * u_modelView * position;
v_viewPosition = u_modelView * position;
v_normal = (u_modelView * vec4(normal, 0)).xyz;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const opaqueFS = `
#version 300 es
precision highp float;
in vec4 v_viewPosition;
in vec3 v_normal;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
out vec4 fragColor;
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
fragColor = vec4(u_color.rgb * light, u_color.a);
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
in vec4 v_viewPosition;
in vec3 v_normal;
out vec4 fragData[2];
// eq (7)
float w(float z, float a) {
return a * max(
pow(10.0, -2.0),
min(
3.0 * pow(10.0, 3.0),
10.0 /
(pow(10.0, -5.0) +
pow(abs(z) / 5.0, 2.0) +
pow(abs(z) / 200.0, 6.0)
)
)
);
}
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
vec4 Ci = vec4(u_color.rgb * light, u_color.a);
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
uniform sampler2D opaqueTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
vec4 transparentColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
vec4 opaqueColor = texelFetch(opaqueTexture, ivec2(gl_FragCoord.xy), 0);
// gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
fragColor = transparentColor * (1. - r) + opaqueColor * r;
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
function makeVAO(programInfo, bufferInfo) {
return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
}
// In order to do proper zbuffering we need to share
// the depth buffer
const opaqueAttachments = [
{ internalFormat: gl.RGBA8, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
];
const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
const transparentAttachments = [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
];
const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// if the canvas is resized also resize the framebuffer
// attachments (the depth buffer will be resized twice
// but I'm too lazy to fix it)
twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
}
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const fov = 45 * Math.PI / 180;
const zNear = 0.1;
const zFar = 500;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, -5];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const lightDirection = v3.normalize([1, 3, 5]);
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.depthMask(true);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
// drawOpaqueSurfaces();
// draw checkerboard
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
// draw a cube with depth buffer
gl.enable(gl.DEPTH_TEST);
{
gl.useProgram(opaqueProgramInfo.program);
gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
let mat = view;
mat = m4.rotateX(mat, time * .1);
mat = m4.rotateY(mat, time * .2);
mat = m4.scale(mat, [1.5, 1.5, 1.5]);
twgl.setUniforms(opaqueProgramInfo, {
u_color: [1, .5, .2, 1],
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
}
twgl.bindFramebufferInfo(gl, transparentFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
// these values change if using separate blend functions
// per attachment (something WebGL2 does not support)
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false); // don't write to depth buffer (but still testing)
gl.enable(gl.BLEND);
// this changes if using separate blend functions per attachment
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);
// drawTransparentSurfaces();
const spheres = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
spheres.forEach((color, ndx) => {
const u = ndx + 2;
let mat = view;
mat = m4.rotateX(mat, time * u * .1);
mat = m4.rotateY(mat, time * u * .2);
mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
twgl.setUniforms(transparentProgramInfo, {
u_color: color,
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
});
// composite transparent results with opaque
twgl.bindFramebufferInfo(gl, null);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
gl.useProgram(compositeProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(compositeProgramInfo, {
ATexture: transparentFBI.attachments[0],
BTexture: transparentFBI.attachments[1],
opaqueTexture: opaqueFBI.attachments[0],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
I have three requirements for my depth testing of semi-transparent objects
It's actually quite rare to have self-intersecting objects with partially transparent (actually blended) samples. The common cases for self-intersecting geometry is grass and leaves. However, in these cases the actual areas covered by grass and leaves are not transparent - they are opaque.
The common solution here is alpha testing. Render the leaves as an opaque (not blended) quad (with a normal depth test and write), and discard fragments which have insufficient alpha (e.g. because they are outside of the leaf). Because individual samples here are opaque, then you get order independence for free because the depth test works as you would expect for an opaque object.
If you want blended edges, then enable alpha-to-coverage and let the multi-sample resolve clean up the edges a little.
For the small amount of actually transparent stuff you have left, then normally you need to a back-to-front sort on the CPU, and render it after the opaque pass.
Proper OIT is possible, but is is generally quite an expensive technique, so I've yet to see anyone actually use it outside of an academic environment (at least on mobile OpenGL ES implementations).