Heavy image downsampling artefacts - glsl

I need to heavily downsample a image to prepare for a perceptual hashing algorithm, for example from a 3024x4032 to a 9x9 image, and I'd like to use webgl for that (as the base image is already processed within the webgl pipeline), and artefacts quickly appear when the downsampling rate is too large.
Taking this sample situation:
// WebGL2 - 2D Image
// from https://webgl2fundamentals.org/webgl/webgl-2d-image.html
"use strict";
var downscaleFactor = 0.5;
var vertexShaderSource = `#version 300 es
// an attribute is an input (in) to a vertex shader.
// It will receive data from a buffer
in vec2 a_position;
in vec2 a_texCoord;
// Used to pass in the resolution of the canvas
uniform vec2 u_resolution;
// Used to pass the texture coordinates to the fragment shader
out vec2 v_texCoord;
// all shaders have a main function
void main() {
// convert the position from pixels to 0.0 to 1.0
vec2 zeroToOne = a_position / u_resolution;
// convert from 0->1 to 0->2
vec2 zeroToTwo = zeroToOne * 2.0;
// convert from 0->2 to -1->+1 (clipspace)
vec2 clipSpace = zeroToTwo - 1.0;
gl_Position = vec4(clipSpace * vec2(1, -1), 0, 1);
// pass the texCoord to the fragment shader
// The GPU will interpolate this value between points.
v_texCoord = a_texCoord;
}
`;
var fragmentShaderSource = `#version 300 es
// fragment shaders don't have a default precision so we need
// to pick one. mediump is a good default. It means "medium precision"
precision mediump float;
// our texture
uniform sampler2D u_image;
// the texCoords passed in from the vertex shader.
in vec2 v_texCoord;
// we need to declare an output for the fragment shader
out vec4 outColor;
void main() {
outColor = texture(u_image, v_texCoord);
}
`;
var image = new Image();
image.src = "https://i.imgur.com/AetKHN1.png";
image.crossOrigin = "anonymous";
image.onload = function() {
render(image);
};
function render(image) {
// Get A WebGL context
/** #type {HTMLCanvasElement} */
var canvas = document.getElementById("canvas");
var gl = canvas.getContext("webgl2");
if (!gl) {
return;
}
// setup GLSL program
var program = webglUtils.createProgramFromSources(gl, [vertexShaderSource, fragmentShaderSource]);
// look up where the vertex data needs to go.
var positionAttributeLocation = gl.getAttribLocation(program, "a_position");
var texCoordAttributeLocation = gl.getAttribLocation(program, "a_texCoord");
// lookup uniforms
var resolutionLocation = gl.getUniformLocation(program, "u_resolution");
var imageLocation = gl.getUniformLocation(program, "u_image");
// Create a vertex array object (attribute state)
var vao = gl.createVertexArray();
// and make it the one we're currently working with
gl.bindVertexArray(vao);
// Create a buffer and put a single pixel space rectangle in
// it (2 triangles)
var positionBuffer = gl.createBuffer();
// Turn on the attribute
gl.enableVertexAttribArray(positionAttributeLocation);
// Bind it to ARRAY_BUFFER (think of it as ARRAY_BUFFER = positionBuffer)
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Tell the attribute how to get data out of positionBuffer (ARRAY_BUFFER)
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
positionAttributeLocation, size, type, normalize, stride, offset);
// provide texture coordinates for the rectangle.
var texCoordBuffer = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, texCoordBuffer);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
0.0, 0.0,
1.0, 0.0,
0.0, 1.0,
0.0, 1.0,
1.0, 0.0,
1.0, 1.0,
]), gl.STATIC_DRAW);
gl.enableVertexAttribArray(texCoordAttributeLocation);
var size = 2; // 2 components per iteration
var type = gl.FLOAT; // the data is 32bit floats
var normalize = false; // don't normalize the data
var stride = 0; // 0 = move forward size * sizeof(type) each iteration to get the next position
var offset = 0; // start at the beginning of the buffer
gl.vertexAttribPointer(
texCoordAttributeLocation, size, type, normalize, stride, offset);
// Create a texture.
var texture = gl.createTexture();
// make unit 0 the active texture uint
// (ie, the unit all other texture commands will affect
gl.activeTexture(gl.TEXTURE0 + 0);
// Bind it to texture unit 0' 2D bind point
gl.bindTexture(gl.TEXTURE_2D, texture);
// Set the parameters so we don't need mips and so we're not filtering
// and we don't repeat at the edges
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
// Upload the image into the texture.
var mipLevel = 0; // the largest mip
var internalFormat = gl.RGBA; // format we want in the texture
var srcFormat = gl.RGBA; // format of data we are supplying
var srcType = gl.UNSIGNED_BYTE; // type of data we are supplying
gl.texImage2D(gl.TEXTURE_2D,
mipLevel,
internalFormat,
srcFormat,
srcType,
image);
webglUtils.resizeCanvasToDisplaySize(gl.canvas);
// Tell WebGL how to convert from clip space to pixels
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
// Clear the canvas
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
// Tell it to use our program (pair of shaders)
gl.useProgram(program);
// Bind the attribute/buffer set we want.
gl.bindVertexArray(vao);
// Pass in the canvas resolution so we can convert from
// pixels to clipspace in the shader
gl.uniform2f(resolutionLocation, gl.canvas.width, gl.canvas.height);
// Tell the shader to get the texture from texture unit 0
gl.uniform1i(imageLocation, 0);
// Bind the position buffer so gl.bufferData that will be called
// in setRectangle puts data in the position buffer
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer);
// Set a rectangle the same size as the image.
setRectangle(gl, 0, 0, downscaleFactor * image.width, downscaleFactor * image.height);
// Draw the rectangle.
var primitiveType = gl.TRIANGLES;
var offset = 0;
var count = 6;
gl.drawArrays(primitiveType, offset, count);
}
function setRectangle(gl, x, y, width, height) {
var x1 = x;
var x2 = x + width;
var y1 = y;
var y2 = y + height;
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array([
x1, y1,
x2, y1,
x1, y2,
x1, y2,
x2, y1,
x2, y2,
]), gl.STATIC_DRAW);
}
body {
margin: 0;
}
canvas {
width: 500px;
height: 500px;
display: block;
}
<canvas id="canvas"></canvas>
<!--
for most samples webgl-utils only provides shader compiling/linking and
canvas resizing because why clutter the examples with code that's the same in every sample.
See http://webgl2fundamentals.org/webgl/lessons/webgl-boilerplate.html
and http://webgl2fundamentals.org/webgl/lessons/webgl-resizing-the-canvas.html
for webgl-utils, m3, m4, and webgl-lessons-ui.
-->
<script src="https://webgl2fundamentals.org/webgl/resources/webgl-utils.js"></script>
where the base image is set-up with gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR);, the image resize works fine with downscaleFactor >= 0.5, until I reduce the factor further when artefacts appear and things get ugly (try downscaleFactor = 0.15 at line 6 and run).
I've tried changing the fragment shader's texture with a bilinear interpolation function, but the issue still exists.
vec4 textureBilinear(sampler2D textureSampler, vec2 textureCoordinate) {
vec2 texelSize = 1.0 / vec2(textureSize(textureSampler, 0));
vec4 p0q0 = texture(textureSampler, textureCoordinate);
vec4 p1q0 = texture(textureSampler, textureCoordinate + vec2(texelSize.x, 0));
vec4 p0q1 = texture(textureSampler, textureCoordinate + vec2(0, texelSize.y));
vec4 p1q1 = texture(textureSampler, textureCoordinate + vec2(texelSize.x , texelSize.y));
float a = fract(textureCoordinate.x);
float b = fract(textureCoordinate.y);
vec4 pInterp_q0 = mix(p0q0, p1q0, a);
vec4 pInterp_q1 = mix(p0q1, p1q1, a);
return mix(pInterp_q0, pInterp_q1, b);
}
When I do try to downsample the image with a photo-editing software, I get a smooth result as expected (here, bilinear). What are strategies that would yield the same result? (possibly in one pass)

Related

Why my texture coordinates are inverted each time I call my glsl shader in p5js?

I am trying to use a glsl shader with p5js to create a simulation like the game of life. To do that I want to create a shader which will take a texture as uniform and which will draw a new texture based on this previous texture. In a next iteration this new texture will be used as uniform and that should allow me create a simulation following the idea exposed here. I am experienced with p5.js but I'm completely new to shader programming so I'm probably missing something.
For now my code is as straightforward as possible:
In the preload() function, I create a texture using the createImage() function and setup some pixels to be white and the others to be black.
In the setup() function I use this texture to run the shader a first time to create a new texture. I also set a timer to run the shader at regular intervals and draw the result in a buffer.
In the draw() function I draw the buffer in the canvas.
To keep things simple I keep the canvas and the texture the same size.
My issue is that at some point the y coordinates in my code seems to get inverted and I don't understand why. My understanding is that my code should show a still image but each time I run the shader the image is inverted. Here is what I mean:
I am not sure if my issue comes from how I use glsl or how I use p5 or a mix of both. Can someone explain to me where this weird y inversion comes from?
Here is my minimal reproducible example (which is also in the p5 editor here):
The sketch file:
const sketch = (p5) => {
const D = 100;
let initialTexture;
p5.preload = () => {
// Create the initial image
initialTexture = p5.createImage(D, D);
initialTexture.loadPixels();
for (let i = 0; i < initialTexture.width; i++) {
for (let j = 0; j < initialTexture.height; j++) {
const alive = i === j || i === 10 || j === 40;
const color = p5.color(250, 250, 250, alive ? 250 : 0);
initialTexture.set(i, j, color);
}
}
initialTexture.updatePixels();
// Initialize the shader
shader = p5.loadShader('uniform.vert', 'test.frag');
};
p5.setup = () => {
const canvas = p5.createCanvas(D, D, p5.WEBGL);
canvas.parent('canvasDiv');
// Create the buffer the shader will draw on
graphics = p5.createGraphics(D, D, p5.WEBGL);
graphics.shader(shader);
/*
* Initial step to setup the initial texture
*/
// Used to normalize the frag coordinates
shader.setUniform('u_resolution', [p5.width, p5.height]);
// First state of the simulation
shader.setUniform('u_texture', initialTexture);
graphics.rect(0, 0, p5.width, p5.height);
// Call the shader each time interval
setInterval(updateSimulation, 1009);
};
const updateSimulation = () => {
// Use the previous state as a texture
shader.setUniform('u_texture', graphics);
graphics.rect(0, 0, p5.width, p5.height);
};
p5.draw = () => {
p5.background(0);
// Use the buffer on the canvas
p5.image(graphics, -p5.width / 2, -p5.height / 2);
};
};
new p5(sketch);
The fragment shader which for now only takes the color of the texture and reuses it (I tried using st instead of uv to no avail):
precision highp float;
uniform vec2 u_resolution;
uniform sampler2D u_texture;
// grab texcoords from vert shader
varying vec2 vTexCoord;
void main() {
// Normalize the position between 0 and 1
vec2 st = gl_FragCoord.xy/u_resolution.xy;
// Get the texture coordinate from the vertex shader
vec2 uv = vTexCoord;
// Get the color at the texture coordinate
vec4 c = texture2D(u_texture, uv);
// Reuse the same color
gl_FragColor = c;
}
And the vertex shader which I took from an example and does nothing excepted passing the coordinates:
/*
* vert file and comments from adam ferriss https://github.com/aferriss/p5jsShaderExamples with additional comments from Louise Lessel
*/
precision highp float;
// This “vec3 aPosition” is a built in shader functionality. You must keep that naming.
// It automatically gets the position of every vertex on your canvas
attribute vec3 aPosition;
attribute vec2 aTexCoord;
varying vec2 vTexCoord;
// We always must do at least one thing in the vertex shader:
// tell the pixel where on the screen it lives:
void main() {
// copy the texcoords
vTexCoord = aTexCoord;
// copy the position data into a vec4, using 1.0 as the w component
vec4 positionVec4 = vec4(aPosition, 1.0);
positionVec4.xy = positionVec4.xy * 2.0 - 1.0;
// Send the vertex information on to the fragment shader
// this is done automatically, as long as you put it into the built in shader function “gl_Position”
gl_Position = positionVec4;
}
Long story short: the texture coordinates for a rectangle or a plane drawn with p5.js are (0, 0) in the bottom left, and (1, 1) in the top right, where as the coordinate system for sampling values from a texture are (0, 0) in the top left and (1, 1) in the bottom right. You can verify this by commenting out your color sampling code in your fragment shader and using the following:
float val = (uv.x + uv.y) / 2.0;
gl_FragColor = vec4(val, val, val, 1.0);
As you can see by the resulting image:
The value (0 + 0) / 2 results in black in the lower left, and (1 + 1) / 2 results in white in the upper right.
So, to sample the correct portion of the texture you just need to flip the y component of the uv vector:
texture2D(u_texture, vec2(uv.x, 1.0 - uv.y));
const sketch = (p5) => {
const D = 200;
let initialTexture;
p5.preload = () => {
// This doesn't actually need to go in preload
// Create the initial image
initialTexture = p5.createImage(D, D);
initialTexture.loadPixels();
for (let i = 0; i < initialTexture.width; i++) {
for (let j = 0; j < initialTexture.height; j++) {
// draw a big checkerboard
const alive = (p5.round(i / 10) + p5.round(j / 10)) % 2 == 0;
const color = alive ? p5.color('white') : p5.color(150, p5.map(j, 0, D, 50, 200), p5.map(i, 0, D, 50, 200));
initialTexture.set(i, j, color);
}
}
initialTexture.updatePixels();
};
p5.setup = () => {
const canvas = p5.createCanvas(D, D, p5.WEBGL);
// Create the buffer the shader will draw on
graphics = p5.createGraphics(D, D, p5.WEBGL);
// Initialize the shader
shader = graphics.createShader(vert, frag);
graphics.shader(shader);
/*
* Initial step to setup the initial texture
*/
// Used to normalize the frag coordinates
shader.setUniform('u_resolution', [p5.width, p5.height]);
// First state of the simulation
shader.setUniform('u_texture', initialTexture);
graphics.rect(0, 0, p5.width, p5.height);
// Call the shader each time interval
setInterval(updateSimulation, 100);
};
const updateSimulation = () => {
// Use the previous state as a texture
shader.setUniform('u_texture', graphics);
graphics.rect(0, 0, p5.width, p5.height);
};
p5.draw = () => {
p5.background(0);
// Use the buffer on the canvas
p5.texture(graphics);
p5.rect(-p5.width / 2, -p5.height / 2, p5.width, p5.height);
};
const frag = `
precision highp float;
uniform vec2 u_resolution;
uniform sampler2D u_texture;
// grab texcoords from vert shader
varying vec2 vTexCoord;
varying vec2 vPos;
void main() {
// Get the texture coordinate from the vertex shader
vec2 uv = vTexCoord;
gl_FragColor = texture2D(u_texture, vec2(uv.x, 1.0 - uv.y));
//// For debugging uv coordinate orientation
// float val = (uv.x + uv.y) / 2.0;
// gl_FragColor = vec4(val, val, val, 1.0);
}
`;
const vert = `
/*
* vert file and comments from adam ferriss https://github.com/aferriss/p5jsShaderExamples with additional comments from Louise Lessel
*/
precision highp float;
// This “vec3 aPosition” is a built in shader functionality. You must keep that naming.
// It automatically gets the position of every vertex on your canvas
attribute vec3 aPosition;
attribute vec2 aTexCoord;
varying vec2 vTexCoord;
// We always must do at least one thing in the vertex shader:
// tell the pixel where on the screen it lives:
void main() {
// copy the texcoords
vTexCoord = aTexCoord;
// copy the position data into a vec4, using 1.0 as the w component
vec4 positionVec4 = vec4(aPosition, 1.0);
// This maps positions 0..1 to -1..1
positionVec4.xy = positionVec4.xy * 2.0 - 1.0;
// Send the vertex information on to the fragment shader
// this is done automatically, as long as you put it into the built in shader function “gl_Position”
gl_Position = positionVec4;
}`;
};
new p5(sketch);
<script src="https://cdn.jsdelivr.net/npm/p5#1.3.1/lib/p5.js"></script>

Multiple output textures from the same program

I'm trying to learn how to do multiple outputs from the same program in WebGL2 leveraging gl.drawBuffer() capabilities.
I looked at the book "OpenGL ES 3.0 Programming Guide", chapter 11 where it lists what is needed for multi-output to take place. However the shader source example is very trivial outputting only constant values.
I'd like to know if someone has a better example? or if one could explain what happened to the TextureCoordinates varying? In normal shader code I would use that to find data values from my inputs and write them out. Now in the face of multiple layouts, how would the TextureCoordinates varying correspond to each layout? What happens to the dimensions of my viewPort? which output Texture does that correspond with?
Here are some steps the way I understood them:
Create a Color attachment array GL_COLOR_ATTACHMENT0, ...
Create a framebuffer object for each output
Create output textures
For each FB:
BindFramebuffer
BindTexture
Associate texture with FBO: frameBufferTexture2D (..., color_attchment_from_step1)
call drawBuffers passing the color attachment array
Inside the shader access output values like this:
layout(location = 0) out vec4 fragData0;
layout(location = 1) out vec4 fragData1;
You only need one framebuffer object. You attach all the textures to it. So your steps would be
Create a framebuffer object and BindFramebuffer
Create output textures
For each texture:
Associate texture with FBO: frameBufferTexture2D(...)
Create a Color attachment array GL_COLOR_ATTACHMENT0, ...
call drawBuffers passing the color attachment array
function main() {
const gl = document.querySelector('canvas').getContext('webgl2');
if (!gl) {
return alert("need WebGL2");
}
const vs = `
#version 300 es
void main() {
gl_PointSize = 300.0;
gl_Position = vec4(0, 0, 0, 1);
}
`;
const fs = `
#version 300 es
precision mediump float;
layout(location = 0) out vec4 outColor0;
layout(location = 1) out vec4 outColor1;
layout(location = 2) out vec4 outColor2;
layout(location = 3) out vec4 outColor3;
void main() {
outColor0 = vec4(1, .5, .3, .7); // orange
outColor1 = vec4(.6, .5, .4, .3); // brown
outColor2 = vec4(.2, .8, .0, 1); // green
outColor3 = vec4(.3, .4, .9, .6); // blue
}
`
const program = twgl.createProgram(gl, [vs, fs]);
const textures = [];
const fb = gl.createFramebuffer();
gl.bindFramebuffer(gl.FRAMEBUFFER, fb);
for (let i = 0; i < 4; ++i) {
const tex = gl.createTexture();
textures.push(tex);
gl.bindTexture(gl.TEXTURE_2D, tex);
const width = 1;
const height = 1;
const level = 0;
gl.texImage2D(gl.TEXTURE_2D, level, gl.RGBA, width, height, 0,
gl.RGBA, gl.UNSIGNED_BYTE, null);
// attach texture to framebuffer
gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + i,
gl.TEXTURE_2D, tex, level);
}
// our framebuffer textures are only 1x1 pixels
gl.viewport(0, 0, 1, 1);
// tell it we want to draw to all 4 attachments
gl.drawBuffers([
gl.COLOR_ATTACHMENT0,
gl.COLOR_ATTACHMENT1,
gl.COLOR_ATTACHMENT2,
gl.COLOR_ATTACHMENT3,
]);
// draw a single point
gl.useProgram(program);
{
const offset = 0;
const count = 1
gl.drawArrays(gl.POINT, 0, 1);
}
// --- below this is not relevant to the question but just so we
// --- we can see it's working
// render the 4 textures
const fs2 = `
#version 300 es
precision mediump float;
uniform sampler2D tex[4];
out vec4 outColor;
void main() {
vec4 colors[4];
// you can't index textures with non-constant integer expressions
// in WebGL2 (you can in WebGL1 lol)
colors[0] = texture(tex[0], vec2(0));
colors[1] = texture(tex[1], vec2(0));
colors[2] = texture(tex[2], vec2(0));
colors[3] = texture(tex[3], vec2(0));
vec4 color = vec4(0);
for (int i = 0; i < 4; ++i) {
float x = gl_PointCoord.x * 4.0;
float amount = step(float(i), x) * step(x, float(i + 1));
color = mix(color, colors[i], amount);
}
outColor = vec4(color.rgb, 1);
}
`;
const prgInfo2 = twgl.createProgramInfo(gl, [vs, fs2]);
gl.bindFramebuffer(gl.FRAMEBUFFER, null);
gl.viewport(0, 0, gl.canvas.width, gl.canvas.height);
gl.useProgram(prgInfo2.program);
// binds all the textures and set the uniforms
twgl.setUniforms(prgInfo2, {
tex: textures,
});
gl.drawArrays(gl.POINTS, 0, 1);
}
main();
<script src="https://twgljs.org/dist/4.x/twgl.min.js"></script>
<canvas></canvas>

Implementing depth testing for semi-transparent objects

I've been carefully trolling the internet for the past two days to understand depth testing for semi-transparent objects. I've read multiple papers/tutorials on the subject and in theory I believe I understand how it works. However none of them give me actual example code.
I have three requirements for my depth testing of semi-transparent objects:
It should be order independant.
It should work if two quads of the same objects are intersection each other. Both semi-transparent. Imagine a grass object that looks like a X when viewed from above:
It should correctly render a semi-transparent player rgba(0, 1, 0, 0.5), behind a building's window rgba(0, 0, 1, 0.5), but in front of a background object rgba(1, 0, 0, 1):
The line on the far left is how I imagine the light/color changes as it travels through the semi-transparent objects towards the camera
Final Thoughts
I suspect the best approach to go for is to do depth peeling, but I'm still lacking some implementation/example. I'm leaning towards this approach because the game is 2.5D and since it could get dangerous for performance (lots of layers to peel), there won't need to be more than two semi-transparent objects to "peel".
I'm already familiar with framebuffers and how to code them (doing some post processing effects with them). I will be using them, right?
Most of the knowledge of opengl comes from this tutorial but it covers depth testing and semi-transparency separately. He also sadly doesn't cover order independent transparency at all (see bottom of Blending page).
Finally, please don't answer only in theory. e.g.
Draw opaque, draw transparent, draw opaque again, etc.
My ideal answer will contain code of how the buffers are configured, the shaders, and screenshots of each pass with an explanation of what its doing.
The programming language used is also not too important as long as it uses OpenGL 4 or newer. The non-opengl code can be pseudo (I don't care how you sort an array or create an GLFW window).
EDIT:
I'm updating my question to just have so example of the current state of my code. This example draws the semi-transparent player (green) first, opaque background (red) second and then the semi-transparent window (blue). However the depth should be calculated by the Z position of the square and not the order of which it is drawn.
(function() {
// your page initialization code here
// the DOM will be available here
var script = document.createElement('script');
script.onload = function () {
main();
};
script.src = 'https://mdn.github.io/webgl-examples/tutorial/gl-matrix.js';
document.head.appendChild(script); //or something of the likes
})();
//
// Start here
//
function main() {
const canvas = document.querySelector('#glcanvas');
const gl = canvas.getContext('webgl', {alpha:false});
// If we don't have a GL context, give up now
if (!gl) {
alert('Unable to initialize WebGL. Your browser or machine may not support it.');
return;
}
// Vertex shader program
const vsSource = `
attribute vec4 aVertexPosition;
attribute vec4 aVertexColor;
uniform mat4 uModelViewMatrix;
uniform mat4 uProjectionMatrix;
varying lowp vec4 vColor;
void main(void) {
gl_Position = uProjectionMatrix * uModelViewMatrix * aVertexPosition;
vColor = aVertexColor;
}
`;
// Fragment shader program
const fsSource = `
varying lowp vec4 vColor;
void main(void) {
gl_FragColor = vColor;
}
`;
// Initialize a shader program; this is where all the lighting
// for the vertices and so forth is established.
const shaderProgram = initShaderProgram(gl, vsSource, fsSource);
// Collect all the info needed to use the shader program.
// Look up which attributes our shader program is using
// for aVertexPosition, aVevrtexColor and also
// look up uniform locations.
const programInfo = {
program: shaderProgram,
attribLocations: {
vertexPosition: gl.getAttribLocation(shaderProgram, 'aVertexPosition'),
vertexColor: gl.getAttribLocation(shaderProgram, 'aVertexColor'),
},
uniformLocations: {
projectionMatrix: gl.getUniformLocation(shaderProgram, 'uProjectionMatrix'),
modelViewMatrix: gl.getUniformLocation(shaderProgram, 'uModelViewMatrix'),
},
};
// Here's where we call the routine that builds all the
// objects we'll be drawing.
const buffers = initBuffers(gl);
// Draw the scene
drawScene(gl, programInfo, buffers);
}
//
// initBuffers
//
// Initialize the buffers we'll need. For this demo, we just
// have one object -- a simple two-dimensional square.
//
function initBuffers(gl) {
// Create a buffer for the square's positions.
const positionBuffer0 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer0);
// Now create an array of positions for the square.
var positions = [
0.5, 0.5,
-0.5, 0.5,
0.5, -0.5,
-0.5, -0.5,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
var colors = [
0.0, 1.0, 0.0, 0.5, // white
0.0, 1.0, 0.0, 0.5, // red
0.0, 1.0, 0.0, 0.5, // green
0.0, 1.0, 0.0, 0.5, // blue
];
const colorBuffer0 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer0);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
// Create a buffer for the square's positions.
const positionBuffer1 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer1);
// Now create an array of positions for the square.
positions = [
2.0, 0.4,
-2.0, 0.4,
2.0, -2.0,
-2.0, -2.0,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
colors = [
1.0, 0.0, 0.0, 1.0, // white
1.0, 0.0, 0.0, 1.0, // red
1.0, 0.0, 0.0, 1.0, // green
1.0, 0.0, 0.0, 1.0, // blue
];
const colorBuffer1 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer1);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
// Create a buffer for the square's positions.
const positionBuffer2 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer2);
// Now create an array of positions for the square.
positions = [
1.0, 1.0,
-0.0, 1.0,
1.0, -1.0,
-0.0, -1.0,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
colors = [
0.0, 0.0, 1.0, 0.5, // white
0.0, 0.0, 1.0, 0.5, // red
0.0, 0.0, 1.0, 0.5, // green
0.0, 0.0, 1.0, 0.5, // blue
];
const colorBuffer2 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer2);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
return {
position0: positionBuffer0,
color0: colorBuffer0,
position1: positionBuffer1,
color1: colorBuffer1,
position2: positionBuffer2,
color2: colorBuffer2,
};
}
//
// Draw the scene.
//
function drawScene(gl, programInfo, buffers) {
gl.clearColor(0.0, 0.0, 0.0, 1.0); // Clear to black, fully opaque
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
//gl.clearDepth(1.0); // Clear everything
gl.disable(gl.DEPTH_TEST)
gl.enable(gl.BLEND)
gl.blendEquation(gl.FUNC_ADD)
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
// Clear the canvas before we start drawing on it.
// Create a perspective matrix, a special matrix that is
// used to simulate the distortion of perspective in a camera.
// Our field of view is 45 degrees, with a width/height
// ratio that matches the display size of the canvas
// and we only want to see objects between 0.1 units
// and 100 units away from the camera.
const fieldOfView = 45 * Math.PI / 180; // in radians
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 100.0;
const projectionMatrix = mat4.create();
// note: glmatrix.js always has the first argument
// as the destination to receive the result.
mat4.perspective(projectionMatrix,
fieldOfView,
aspect,
zNear,
zFar);
// Set the drawing position to the "identity" point, which is
// the center of the scene.
const modelViewMatrix = mat4.create();
// Now move the drawing position a bit to where we want to
// start drawing the square.
mat4.translate(modelViewMatrix, // destination matrix
modelViewMatrix, // matrix to translate
[-0.0, 0.0, -6.0]); // amount to translate
function drawSquare(positionbuffer, colorbuffer) {
// Tell WebGL how to pull out the positions from the position
// buffer into the vertexPosition attribute
{
const numComponents = 2;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, positionbuffer);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexPosition,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.vertexPosition);
}
// Tell WebGL how to pull out the colors from the color buffer
// into the vertexColor attribute.
{
const numComponents = 4;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, colorbuffer);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexColor,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.vertexColor);
}
// Tell WebGL to use our program when drawing
gl.useProgram(programInfo.program);
// Set the shader uniforms
gl.uniformMatrix4fv(
programInfo.uniformLocations.projectionMatrix,
false,
projectionMatrix);
gl.uniformMatrix4fv(
programInfo.uniformLocations.modelViewMatrix,
false,
modelViewMatrix);
{
const offset = 0;
const vertexCount = 4;
gl.drawArrays(gl.TRIANGLE_STRIP, offset, vertexCount);
}
}
drawSquare(buffers.position0, buffers.color0); // Player
drawSquare(buffers.position1, buffers.color1); // Background
drawSquare(buffers.position2, buffers.color2); // Window
}
//
// Initialize a shader program, so WebGL knows how to draw our data
//
function initShaderProgram(gl, vsSource, fsSource) {
const vertexShader = loadShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = loadShader(gl, gl.FRAGMENT_SHADER, fsSource);
// Create the shader program
const shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
// If creating the shader program failed, alert
if (!gl.getProgramParameter(shaderProgram, gl.LINK_STATUS)) {
alert('Unable to initialize the shader program: ' + gl.getProgramInfoLog(shaderProgram));
return null;
}
return shaderProgram;
}
//
// creates a shader of the given type, uploads the source and
// compiles it.
//
function loadShader(gl, type, source) {
const shader = gl.createShader(type);
// Send the source to the shader object
gl.shaderSource(shader, source);
// Compile the shader program
gl.compileShader(shader);
// See if it compiled successfully
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
alert('An error occurred compiling the shaders: ' + gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<title></title>
</head>
<body>
<canvas id="glcanvas" width="640" height="480"></canvas>
</body>
</html>
This seems to be the what the paper linked by ripi2 is doing
function main() {
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
uniform mat4 u_matrix;
void main() {
gl_Position = u_matrix * position;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 Ci;
out vec4 fragData[2];
float w(float z, float a) {
return a * max(pow(10.0,-2.0),3.0*pow(10.0,3.0)*pow((1.0 - z), 3.));
}
void main() {
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const fbi = twgl.createFramebufferInfo(
gl,
[
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
]);
function render(time) {
time *= 0.001;
twgl.setBuffersAndAttributes(gl, transparentProgramInfo, bufferInfo);
// drawOpaqueSurfaces();
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_matrix: m4.identity(),
});
twgl.drawBufferInfo(gl, bufferInfo);
twgl.bindFramebufferInfo(gl, fbi);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false);
gl.enable(gl.BLEND);
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
// drawTransparentSurfaces();
const quads = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
quads.forEach((color, ndx) => {
const u = ndx / (quads.length - 1);
// change the order every second
const v = ((ndx + time | 0) % quads.length) / (quads.length - 1);
const xy = (u * 2 - 1) * .25;
const z = (v * 2 - 1) * .25;
let mat = m4.identity();
mat = m4.translate(mat, [xy, xy, z]);
mat = m4.scale(mat, [.3, .3, 1]);
twgl.setUniforms(transparentProgramInfo, {
Ci: color,
u_matrix: mat,
});
twgl.drawBufferInfo(gl, bufferInfo);
});
twgl.bindFramebufferInfo(gl, null);
gl.drawBuffers([gl.BACK]);
gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
gl.useProgram(compositeProgramInfo.program);
twgl.setUniforms(compositeProgramInfo, {
ATexture: fbi.attachments[0],
BTexture: fbi.attachments[1],
u_matrix: m4.identity(),
});
twgl.drawBufferInfo(gl, bufferInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
Some things to note:
It's using WebGL2 but it should be possible in WebGL1, you'd have to change the shaders to use GLSL ES 1.0.
It's using floating point textures. The paper mentions you can use half float textures as well. Note that rendering to both half and float textures is an optional feature in even WebGL2. I believe most mobile hardware can render to half but not to float.
It's using weight equation 10 from the paper. There are 4 weight equations in the paper. 7, 8, 9, and 10. To do 7, 8, or 9 you'd need to pass in view space z from the vertex shader to the fragment shader
It's switching the order of drawing every second
The code is pretty straight forward.
It creates 3 shaders. One to draw a checkerboard just so we have something that is opaque to see the transparent stuff drawn above. One is the transparent object shader. The last is the shader the composites the transparent stuff into the scene.
Next it makes 2 textures, a floating point RGBA32F texture and a floating point R32F texture (red channel only). It attaches those to a framebuffer. (that is all done in the 1 function, twgl.createFramebufferInfo. That function makes the textures the same size as the canvas by default.
We make a single quad that goes from -1 to +1
We use that quad to draw the checkerboard into the canvas
Then we turn on blending, setup the blend equations as the paper said, switch to rendering onto our framebuffer, clear that framebuffer. note, it's cleared to 0,0,0,1 and 1 respectively. This is the version where we don't have separate blend functions per draw buffer. If you switch to the version that can use separate blending functions per draw buffer then you need to clear to different values and use a different shader (See paper)
Using our transparency shader we that same quad to draw 6 rectangles each of a solid color. I just used a solid color to keep it simple. Each is at a different Z and the Zs change every second just to see the results Z changing.
In the shader Ci is the input color. It's expected to be a premultiplied alpha color according to the paper. fragData[0]is the "accumulate" texture andfragData[1]is the "revealage" texture and is only one channel, red. Thew` function represents the equation 10 from the paper.
After all 6 quads are drawn we switch back to rendering to the canvas and use the compositing shader to composite the transparency result with the non-transparent canvas contents.
Here's an example with some geometry. Differences:
It's using equations (7) from the paper instead of (10)
In order to do correct zbuffering the depth buffer needs to be shared when doing opaque and transparent rendering. So there are 2 frames buffers. One buffer has RGBA8 + depth, the other is RGBA32F + R32F + depth. The depth buffer is shared.
The transparent renderer computes simple lighting and then uses the result as the Ci value from the paper
After compositing the transparent into the opaque we still need to copy the opaque into the canvas to see the result
function main() {
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
layout(location=1) in vec3 normal;
uniform mat4 u_projection;
uniform mat4 u_modelView;
out vec4 v_viewPosition;
out vec3 v_normal;
void main() {
gl_Position = u_projection * u_modelView * position;
v_viewPosition = u_modelView * position;
v_normal = (u_modelView * vec4(normal, 0)).xyz;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const opaqueFS = `
#version 300 es
precision highp float;
in vec4 v_viewPosition;
in vec3 v_normal;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
out vec4 fragColor;
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
fragColor = vec4(u_color.rgb * light, u_color.a);
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
in vec4 v_viewPosition;
in vec3 v_normal;
out vec4 fragData[2];
// eq (7)
float w(float z, float a) {
return a * max(
pow(10.0, -2.0),
min(
3.0 * pow(10.0, 3.0),
10.0 /
(pow(10.0, -5.0) +
pow(abs(z) / 5.0, 2.0) +
pow(abs(z) / 200.0, 6.0)
)
)
);
}
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
vec4 Ci = vec4(u_color.rgb * light, u_color.a);
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
}
`;
const blitFS = `
#version 300 es
precision highp float;
uniform sampler2D u_texture;
out vec4 fragColor;
void main() {
fragColor = texelFetch(u_texture, ivec2(gl_FragCoord.xy), 0);
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const blitProgramInfo = twgl.createProgramInfo(gl, [vs, blitFS]);
const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
function makeVAO(programInfo, bufferInfo) {
return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
}
// In order to do proper zbuffering we need to share
// the depth buffer
const opaqueAttachments = [
{ internalFormat: gl.RGBA8, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
];
const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
const transparentAttachments = [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
];
const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// if the canvas is resized also resize the framebuffer
// attachments (the depth buffer will be resized twice
// but I'm too lazy to fix it)
twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
}
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const fov = 45 * Math.PI / 180;
const zNear = 0.1;
const zFar = 500;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, -5];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const lightDirection = v3.normalize([1, 3, 5]);
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.depthMask(true);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
// drawOpaqueSurfaces();
// draw checkerboard
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
// draw a cube with depth buffer
gl.enable(gl.DEPTH_TEST);
{
gl.useProgram(opaqueProgramInfo.program);
gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
let mat = view;
mat = m4.rotateX(mat, time * .1);
mat = m4.rotateY(mat, time * .2);
mat = m4.scale(mat, [1.5, 1.5, 1.5]);
twgl.setUniforms(opaqueProgramInfo, {
u_color: [1, .5, .2, 1],
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
}
twgl.bindFramebufferInfo(gl, transparentFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
// these values change if using separate blend functions
// per attachment (something WebGL2 does not support)
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false); // don't write to depth buffer (but still testing)
gl.enable(gl.BLEND);
// this changes if using separate blend functions per attachment
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);
// drawTransparentSurfaces();
const spheres = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
spheres.forEach((color, ndx) => {
const u = ndx + 2;
let mat = view;
mat = m4.rotateX(mat, time * u * .1);
mat = m4.rotateY(mat, time * u * .2);
mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
twgl.setUniforms(transparentProgramInfo, {
u_color: color,
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
});
// composite transparent results with opaque
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.disable(gl.DEPTH_TEST);
gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
gl.useProgram(compositeProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(compositeProgramInfo, {
ATexture: transparentFBI.attachments[0],
BTexture: transparentFBI.attachments[1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
// draw opaque color buffer into canvas
// could probably use gl.blitFramebuffer
gl.disable(gl.BLEND);
twgl.bindFramebufferInfo(gl, null);
gl.useProgram(blitProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(blitProgramInfo, {
u_texture: opaqueFBI.attachments[0],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
It occurs to me rather than use standard OpenGL blending for the last 2 steps (composite followed by blit) we could change the composite shader so it takes 3 textures (ATexutre, BTexture, opaqueTexture) and blends in the shader outputting directly to the canvas. That would be faster.
function main() {
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
layout(location=1) in vec3 normal;
uniform mat4 u_projection;
uniform mat4 u_modelView;
out vec4 v_viewPosition;
out vec3 v_normal;
void main() {
gl_Position = u_projection * u_modelView * position;
v_viewPosition = u_modelView * position;
v_normal = (u_modelView * vec4(normal, 0)).xyz;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const opaqueFS = `
#version 300 es
precision highp float;
in vec4 v_viewPosition;
in vec3 v_normal;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
out vec4 fragColor;
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
fragColor = vec4(u_color.rgb * light, u_color.a);
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
in vec4 v_viewPosition;
in vec3 v_normal;
out vec4 fragData[2];
// eq (7)
float w(float z, float a) {
return a * max(
pow(10.0, -2.0),
min(
3.0 * pow(10.0, 3.0),
10.0 /
(pow(10.0, -5.0) +
pow(abs(z) / 5.0, 2.0) +
pow(abs(z) / 200.0, 6.0)
)
)
);
}
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
vec4 Ci = vec4(u_color.rgb * light, u_color.a);
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
uniform sampler2D opaqueTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
vec4 transparentColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
vec4 opaqueColor = texelFetch(opaqueTexture, ivec2(gl_FragCoord.xy), 0);
// gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
fragColor = transparentColor * (1. - r) + opaqueColor * r;
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
function makeVAO(programInfo, bufferInfo) {
return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
}
// In order to do proper zbuffering we need to share
// the depth buffer
const opaqueAttachments = [
{ internalFormat: gl.RGBA8, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
];
const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
const transparentAttachments = [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
];
const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// if the canvas is resized also resize the framebuffer
// attachments (the depth buffer will be resized twice
// but I'm too lazy to fix it)
twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
}
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const fov = 45 * Math.PI / 180;
const zNear = 0.1;
const zFar = 500;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, -5];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const lightDirection = v3.normalize([1, 3, 5]);
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.depthMask(true);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
// drawOpaqueSurfaces();
// draw checkerboard
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
// draw a cube with depth buffer
gl.enable(gl.DEPTH_TEST);
{
gl.useProgram(opaqueProgramInfo.program);
gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
let mat = view;
mat = m4.rotateX(mat, time * .1);
mat = m4.rotateY(mat, time * .2);
mat = m4.scale(mat, [1.5, 1.5, 1.5]);
twgl.setUniforms(opaqueProgramInfo, {
u_color: [1, .5, .2, 1],
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
}
twgl.bindFramebufferInfo(gl, transparentFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
// these values change if using separate blend functions
// per attachment (something WebGL2 does not support)
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false); // don't write to depth buffer (but still testing)
gl.enable(gl.BLEND);
// this changes if using separate blend functions per attachment
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);
// drawTransparentSurfaces();
const spheres = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
spheres.forEach((color, ndx) => {
const u = ndx + 2;
let mat = view;
mat = m4.rotateX(mat, time * u * .1);
mat = m4.rotateY(mat, time * u * .2);
mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
twgl.setUniforms(transparentProgramInfo, {
u_color: color,
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
});
// composite transparent results with opaque
twgl.bindFramebufferInfo(gl, null);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
gl.useProgram(compositeProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(compositeProgramInfo, {
ATexture: transparentFBI.attachments[0],
BTexture: transparentFBI.attachments[1],
opaqueTexture: opaqueFBI.attachments[0],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
I have three requirements for my depth testing of semi-transparent objects
It's actually quite rare to have self-intersecting objects with partially transparent (actually blended) samples. The common cases for self-intersecting geometry is grass and leaves. However, in these cases the actual areas covered by grass and leaves are not transparent - they are opaque.
The common solution here is alpha testing. Render the leaves as an opaque (not blended) quad (with a normal depth test and write), and discard fragments which have insufficient alpha (e.g. because they are outside of the leaf). Because individual samples here are opaque, then you get order independence for free because the depth test works as you would expect for an opaque object.
If you want blended edges, then enable alpha-to-coverage and let the multi-sample resolve clean up the edges a little.
For the small amount of actually transparent stuff you have left, then normally you need to a back-to-front sort on the CPU, and render it after the opaque pass.
Proper OIT is possible, but is is generally quite an expensive technique, so I've yet to see anyone actually use it outside of an academic environment (at least on mobile OpenGL ES implementations).

Array of structures into Compute Shader

Writing a simple compute shader in OpenGL to understand how it works, I can't manage to obtain the wanted result.
I want to pass to my compute shader an array of structures colourStruct to color an output texture.
I would like to have a red image when "wantedColor" = 0 in my compute shader and a green image "wantedColor" = 1, blue for 2.
But I actually have only red when "wantedColor" = 1 or 2 or 3 and black when "wantedColor" > 2...
If someone has an idea, or maybe I did not understand the compute shader inputs ideas.
Thank you for your help, here is the interesting part of my code.
My compute shader :
#version 430 compatibility
layout(std430, binding=4) buffer Couleureuh
{
vec3 Coul[3]; // array of structures
};
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform image2D img_output;
void main() {
// base pixel colour for image
vec4 pixel = vec4(0.0, 0.0, 0.0, 1.0);
// get index in global work group i.e x,y, position
ivec2 pixel_coords = ivec2(gl_GlobalInvocationID.xy);
ivec2 dims = imageSize (img_output);
int colorWanted = 0;
pixel = vec4(Coul[colorWanted], 1.0);
// output to a secific pixel in the image
imageStore (img_output, pixel_coords, pixel);
}
Compute shader and SSBO initialization:
GLuint structBuffer;
glGenBuffers(1, &structBuffer);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, structBuffer);
glBufferData(GL_SHADER_STORAGE_BUFFER, 3*sizeof(colorStruct), NULL, GL_STATIC_DRAW);
GLint bufMask = GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT; // invalidate makes a ig difference when re-writting
colorStruct *coul;
coul = (colorStruct *) glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, 3*sizeof(colorStruct), bufMask);
coul[0].r = 1.0f;
coul[0].g = 0.0f;
coul[0].b = 0.0f;
coul[1].r = 0.0f;
coul[1].g = 1.0f;
coul[1].b = 0.0f;
coul[2].r = 0.0f;
coul[2].g = 0.0f;
coul[2].b = 1.0f;
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 4, structBuffer);
m_out_texture.bindImage();
// Launch compute shader
m_shader.use();
glDispatchCompute(m_tex_w, m_tex_h, 1);
// Prevent samplign before all writes to image are done
glMemoryBarrier(GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
vec3 are always 16-byte aligned. As such, when they're in an array, they act like vec4s. Even with std430 layout.
Never use vec3 in interface blocks. You should either use an array of floats (individually access the 3 members you want) or an array of vec4 (with an unused element).

Lighting Dual depth peeling

I'm doing Dual depth peeling. I want to ask you, how to properly. I have algorithm like this.
glDisable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glBindFramebuffer(GL_FRAMEBUFFER, dualDepthFBOID);
// Render targets 1 and 2 store the front and back colors
// Clear to 0.0 and use MAX blending to filter written color
// At most one front color and one back color can be written every pass
glDrawBuffers(2, &drawBuffers[1]);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
GL_CHECK_ERRORS
// Render target 0 stores (-minDepth, maxDepth, alphaMultiplier)
glDrawBuffer(drawBuffers[0]);
glClearColor(-MAX_DEPTH, -MAX_DEPTH, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
glBlendEquation(GL_MAX);
DrawScene(MVP, initShader);
// 2. Depth peeling + blending pass
glDrawBuffer(drawBuffers[6]);
glClearColor(bg.x, bg.y, bg.z, bg.w);
glClear(GL_COLOR_BUFFER_BIT);
int numLayers = (NUM_PASSES - 1) * 2;
int currId = 0;
for (int layer = 1; bUseOQ || layer < numLayers; layer++) {
currId = layer % 2;
int prevId = 1 - currId;
int bufId = currId * 3;
glDrawBuffers(2, &drawBuffers[bufId+1]);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawBuffer(drawBuffers[bufId+0]);
glClearColor(-MAX_DEPTH, -MAX_DEPTH, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
// Render target 0: RG32F MAX blending
// Render target 1: RGBA MAX blending
// Render target 2: RGBA MAX blending
glDrawBuffers(3, &drawBuffers[bufId+0]);
glBlendEquation(GL_MAX);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE, depthTexID[prevId]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_RECTANGLE, texID[prevId]);
DrawScene(MVP, dualPeelShader, true,true);
// Full screen pass to alpha-blend the back color
glDrawBuffer(drawBuffers[6]);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
if (bUseOQ) {
glBeginQuery(GL_SAMPLES_PASSED_ARB, queryId);
}
GL_CHECK_ERRORS
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE, backTexID[currId]);
blendShader.Use();
DrawFullScreenQuad();
blendShader.UnUse();
if (bUseOQ) {
glEndQuery(GL_SAMPLES_PASSED);
GLuint sample_count;
glGetQueryObjectuiv(queryId, GL_QUERY_RESULT, &sample_count);
if (sample_count == 0) {
break;
}
}
GL_CHECK_ERRORS
}
GL_CHECK_ERRORS
glDisable(GL_BLEND);
// 3. Final render pass
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDrawBuffer(GL_BACK_LEFT);
glBindTexture(GL_TEXTURE_RECTANGLE, colorBlenderTexID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE, depthTexID[currId]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_RECTANGLE, texID[currId]);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_RECTANGLE, colorBlenderTexID);
finalShader.Use();
DrawFullScreenQuad();
finalShader.UnUse();
I'm doing lighting in dualPeelShader, where are lighting each pass. This is resulting to extremly bright object. Should I do lighting in finalShader?
---EDIT----
Peel Fragment Shader
#version 330 core
layout(location = 0) out vec4 vFragColor0;
layout(location = 1) out vec4 vFragColor1;
layout(location = 2) out vec4 vFragColor2;
uniform vec4 vColor;
uniform float isObject;
uniform vec3 LightPosition;
uniform sampler2DRect depthBlenderTex;
uniform sampler2DRect frontBlenderTex;
in vec4 vOutColor;
in vec3 position;
in vec3 normal;
in vec3 eyeDirection;
in vec3 lightDirection;
#define MAX_DEPTH 1.0
vec4 Lighted()
{
vec3 LightColor = vec3(1.0,1.0,1.0);
float LightPower = 50;
// Material properties
vec3 MaterialDiffuseColor = vOutColor.rgb;
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * MaterialDiffuseColor;
vec3 MaterialSpecularColor = vec3(0.3,0.3,0.3);
// Distance to the light
float distance = length( LightPosition - position );
// Normal of the computed fragment, in camera space
vec3 n = normalize( normal );
// Direction of the light (from the fragment to the light)
vec3 l = normalize( lightDirection);
// Cosvaryinge of the angle between the normal and the light direction,
// clamped above 0
// - light is at the vertical of the triangle -> 1
// - light is perpendicular to the triangle -> 0
// - light is behvaryingd the triangle -> 0
float cosTheta = clamp( dot( n,l ), 0,1 );
// Eye vector (towards the camera)
vec3 E = normalize(eyeDirection);
// Direction in which the triangle reflects the light
vec3 R = reflect(-l,n);
// Cosvaryinge of the angle between the Eye vector and the Reflect vector,
// clamped to 0
// - Lookvaryingg varyingto the reflection -> 1
// - Lookvaryingg elsewhere -> < 1
float cosAlpha = clamp( dot( E,R ), 0,1 );
return vec4(MaterialAmbientColor + MaterialDiffuseColor * LightColor * LightPower * cosTheta / (distance*distance) +
MaterialSpecularColor * LightColor * LightPower * pow(cosAlpha,5) / (distance*distance),vOutColor.a);
}
void main(void)
{
float fragDepth = gl_FragCoord.z;
vec2 depthBlender = texture(depthBlenderTex, gl_FragCoord.xy).xy;
vec4 forwardTemp = texture(frontBlenderTex, gl_FragCoord.xy);
// Depths and 1.0-alphaMult always increase
// so we can use pass-through by default with MAX blending
vFragColor0.xy = depthBlender;
// Front colors always increase (DST += SRC*ALPHA_MULT)
// so we can use pass-through by default with MAX blending
vFragColor1 = forwardTemp;
// Because over blending makes color increase or decrease,
// we cannot pass-through by default.
// Each pass, only one fragment writes a color greater than 0
vFragColor2 = vec4(0.0);
float nearestDepth = -depthBlender.x;
float farthestDepth = depthBlender.y;
float alphaMultiplier = 1.0 - forwardTemp.w;
if (fragDepth < nearestDepth || fragDepth > farthestDepth) {
// Skip this depth in the peeling algorithm
vFragColor0.xy = vec2(-MAX_DEPTH);
return;
}
if (fragDepth > nearestDepth && fragDepth < farthestDepth) {
// This fragment needs to be peeled again
vFragColor0.xy = vec2(-fragDepth, fragDepth);
return;
}
// If we made it here, this fragment is on the peeled layer from last pass
// therefore, we need to shade it, and make sure it is not peeled any farther
vFragColor0.xy = vec2(-MAX_DEPTH);
vec4 Color;
if(isObject == 0.0)
Color = vColor;
else
Color = Lighted();
if (fragDepth == nearestDepth) {
vFragColor1.xyz += Color.rgb * Color.a * alphaMultiplier;
vFragColor1.w = 1.0 - alphaMultiplier * (1.0 - Color.a);
} else {
vFragColor2 += Color;
}
}
Blend Fragment Shader
#version 330 core
uniform sampler2DRect tempTexture;
layout(location = 0) out vec4 vFragColor;
void main(void)
{
vFragColor = texture(tempTexture, gl_FragCoord.xy);
if(vFragColor.a == 0)
discard;
}