glow depth buffer does not clear - opengl

renderer.draw
pub fn draw(&self, rotation: f32) {
unsafe {
// Shader sources
const TEXTURE_VS_SRC: &str = "
#version 330 core
layout (location = 0) in vec3 aposition;
layout (location = 1) in vec3 acolor;
layout (location = 2) in vec2 atexture_coordinate;
out vec3 color;
out vec2 texture_coordinate;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(aposition, 1.0);
color = acolor;
texture_coordinate = atexture_coordinate;
}
";
const TEXTURE_FS_SRC: &str = "
#version 330 core
// Outputs colors in RGBA
out vec4 FragColor;
// Inputs the color from the Vertex Shader
in vec3 color;
// Inputs the texture coordinates from the Vertex Shader
in vec2 texture_coordinate;
// Gets the Texture Unit from the main function
uniform sampler2D tex0;
void main()
{
FragColor = texture(tex0, texture_coordinate);
}
";
// Vertices coordinates
let VERTICES: Vec<f32> = vec![
// COORDINATES / COLORS / TexCoord //
-0.5, 0.0, 0.5, 0.83, 0.70, 0.44, 0.0, 0.0,
-0.5, 0.0, -0.5, 0.83, 0.70, 0.44, 5.0, 0.0,
0.5, 0.0, -0.5, 0.83, 0.70, 0.44, 0.0, 0.0,
0.5, 0.0, 0.5, 0.83, 0.70, 0.44, 5.0, 0.0,
0.0, 0.8, 0.0, 0.92, 0.86, 0.76, 2.5, 5.0
];
// Indices for vertices order
let INDICES: Vec<u32> = vec![
0, 1, 2,
0, 2, 3,
0, 1, 4,
1, 2, 4,
2, 3, 4,
3, 0, 4
];
self.gl.clear_color(0.3, 0.3, 0.3, 1.0);
self.gl.clear(glow::COLOR_BUFFER_BIT | glow::DEPTH_BUFFER_BIT);
self.gl.clear_depth_f32(1.0);
self.gl.depth_func(glow::LESS);
self.gl.depth_mask(true);
self.gl.enable(glow::DEPTH_TEST);
let shader = Shader::new(&self.gl, TEXTURE_VS_SRC, TEXTURE_FS_SRC);
shader.bind(&self.gl);
shader.upload_uniform_mat4(&self.gl, "model", &glm::rotate(&glm::identity(), rotation, &glm::vec3(0.0, 1.0, 0.0)));
shader.upload_uniform_mat4(&self.gl, "view", &glm::translate(&glm::identity(), &glm::vec3(0.0, -0.5, -2.0)));
shader.upload_uniform_mat4(&self.gl, "projection", &Mat4::new_perspective((800.0 / 600.0), 45.0, 0.01, 100.0));
let mut texture = Texture::new(String::from("sandbox/assets/textures/checkerboard.png"), 1.0);
texture.init(&self.gl);
Texture::bind(&self.gl, texture.get_renderer_id().unwrap(), 0);
shader.upload_uniform_integer1(&self.gl, "tex0", 0);
let layout = BufferLayout::new(
vec![
BufferElement::new("aposition".parse().unwrap(), ShaderDataType::Float3, false),
BufferElement::new("acolor".parse().unwrap(), ShaderDataType::Float3, false),
BufferElement::new("atexture_coordinate".parse().unwrap(), ShaderDataType::Float2, false),
]
);
let index_buffer = IndexBuffer::new(&self.gl, INDICES);
let vertex_buffer = VertexBuffer::new(&self.gl, VERTICES, layout);
let vertex_array = VertexArray::new(&self.gl, index_buffer, vertex_buffer);
self.gl.draw_elements(glow::TRIANGLES, vertex_array.get_indices_len() as i32, glow::UNSIGNED_INT, 0);
}
}
I'm using egui with the glow backend which has its own gl context so i made sure to reset everything before drawing. Obviously this needs refactoring since the resources shouldnt be created every draw but i wanted to get it working first.
texture.init
pub(crate) fn init(&mut self, gl: &glow::Context) {
match image::open(String::from(self.get_path())) {
Err(err) => panic!("Could not load image {}: {}", self.get_path(), err),
Ok(img) => unsafe {
let (width, height) = img.dimensions();
let (image, internal_format, data_format) = match img {
DynamicImage::ImageRgb8(img) => (img.into_raw(), glow::RGB8, glow::RGB),
DynamicImage::ImageRgba8(img) => (img.into_raw(), glow::RGBA8, glow::RGBA),
img => (img.to_rgb8().into_raw(), glow::RGB8, glow::RGB)
};
let renderer_id = gl.create_texture().unwrap();
gl.bind_texture(glow::TEXTURE_2D, Some(renderer_id));
gl.tex_storage_2d(glow::TEXTURE_2D, 1, internal_format, width as i32, height as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_WRAP_S, glow::REPEAT as i32);
gl.tex_parameter_i32(glow::TEXTURE_2D, glow::TEXTURE_WRAP_T, glow::REPEAT as i32);
gl.tex_sub_image_2d(glow::TEXTURE_2D, 0, 0, 0, width as i32, height as i32, data_format, glow::UNSIGNED_BYTE, PixelUnpackData::Slice(image.as_slice()));
gl.generate_mipmap(glow::TEXTURE_2D);
self.set_renderer_id(renderer_id);
}
}
nsight depth state
this is what it looks like
EDIT:
how the window is created
fn create_display(
event_loop: &glutin::event_loop::EventLoop<()>,
title: &str
) -> (
glutin::WindowedContext<glutin::PossiblyCurrent>,
glow::Context,
) {
let window_builder = glutin::window::WindowBuilder::new()
.with_resizable(true)
.with_inner_size(glutin::dpi::LogicalSize {
width: 800.0,
height: 600.0,
})
.with_title(title);
let gl_window = unsafe {
glutin::ContextBuilder::new()
.with_depth_buffer(0)
.with_srgb(true)
.with_stencil_buffer(0)
.with_vsync(true)
.build_windowed(window_builder, event_loop)
.unwrap()
.make_current()
.unwrap()
};
let gl = unsafe { glow::Context::from_loader_function(|s| gl_window.get_proc_address(s)) };
unsafe {
use glow::HasContext as _;
gl.enable(glow::FRAMEBUFFER_SRGB);
}
(gl_window, gl)
}

The default framebuffer does not have a depth buffer. Therefore, the Depth Test does not work at all. You need to specify the depth buffer bits when creating the OpenGL window. e.g. 24 bits:
let gl_window = unsafe {
glutin::ContextBuilder::new()
.with_depth_buffer(24)
.with_srgb(true)
.with_stencil_buffer(0)
.with_vsync(true)
.build_windowed(window_builder, event_loop)
.unwrap()
.make_current()
.unwrap()
};

Related

How to make single color face for a cube in GLSL opegl

I have a rotating cube which continuously rotates
But it always rendered in such a way with interpolated gradient color
I want to color single face with only one color exactly one triangle piece as below
My GLSL
#version 410 core
attribute vec3 position;
attribute vec3 color;
varying vec3 outColor;
uniform float time;
uniform mat4 matrix;
void main()
{
float theta = time;
float co = cos(theta);
float si = sin(theta);
mat4 rotationY = mat4(co, 0, si, 0,
0, 1, 0, 0,
-si, 0, co, 0,
0, 0, 0, 1);
mat4 rotationX = mat4(1, 0, 0, 0,
0, co, -si, 0,
0, si, co, 0,
0, 0, 0, 1);
outColor = color;
gl_Position = matrix * rotationY * rotationX * vec4(position,1.f);
}
#version 410 core
varying vec3 outColor;
uniform float time;
void main()
{
gl_FragColor = vec4(outColor,1);
}
My GLFW code
glClearColor(1f, 1f, 1f, 1f)
glClear(GL_COLOR_BUFFER_BIT.toUInt() or GL_DEPTH_BUFFER_BIT.toUInt())
var time = glfwGetTime()
glUniform1f!!(uniformTime, time.toFloat())
glDrawElements(GL_TRIANGLES, verticesIndex.size, GL_UNSIGNED_SHORT, NULL)
glfwSwapBuffers(window)
glfwPollEvents()
My color array
[
0.8, 0.007315, 0.026764,
0.01696, 0.8, 0.005661,
0.007843, 0.001524, 0.8,
0.01696, 0.8, 0.005661,
0.007843, 0.001524, 0.8,
0.01696, 0.8, 0.005661,
0.007843, 0.001524, 0.8,
0.01696, 0.8, 0.005661,
0.007843, 0.001524, 0.8,
0.01696, 0.8, 0.005661,
0.007843, 0.001524, 0.8,
0.01696, 0.8, 0.005661
]
The usual way is to create a mesh with color attributes where each face has its own vertices with a color attribute of the same color.
Alternatively, you can try using the flat Interpolation qualifier:
The value will not be interpolated. The value given to the fragment shader is the value from the Provoking Vertex for that primitive.
Vertex shader
#version 410 core
in vec3 position;
in vec3 color;
flat out vec3 outColor;
void main()
{
outColor = color
// [...]
}
Fragment shader:
#version 410 core
flat in vec3 outColor;
out vec4 fragColor;
void main()
{
fragColor = vec4(outColor, 1.0);
}
Another option would be to create an array of colors for each triangle primitive and store this color array in a Shader Storage Buffer Object. Use gl_VertexID to address the color in the vertex shader.
vec4 outColor;
layout(std430, binding = 0) buffer primitiveColors
{
vec4 colors[];
};
void main()
{
outColor = colors[gl_VertexID / 3];
// [...]
}

Why drawing a texture using Opengl ( using rust ) is showing border colour

I am drawing this texture using opengl and rust. But for some reason the image has border around it when I draw away from the top left. I think there is something wrong with matrix / viewport. Not entirely sure.
let width = 1280; // Window is initialised to these values. Hard coded for context.
let height = 720;
unsafe {
gl::Viewport(0, 0, width, height);
}
let orth = cgmath::ortho::<f32>(0.0, width as f32, height as f32, 0.0, -1.00, 100.0)
unsafe {
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_BORDER as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_BORDER as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as i32);
gl::TexParameteri(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as i32);
let border_colour = [0.9, 0.2, 0.5, 1.0];
gl::TexParameterfv(gl::TEXTURE_2D, gl::TEXTURE_BORDER_COLOR, border_colour.as_ptr());
gl::TexImage2D(
gl::TEXTURE_2D,
0,
gl::RGBA as i32,
img.width as i32,
img.height as i32,
0,
gl::RGBA,
gl::UNSIGNED_BYTE,
data.as_ptr() as *const c_void,
);
gl::GenerateMipmap(gl::TEXTURE_2D);
}
Before Drawing the texture, I clear the display
pub fn clear_frame_buffer() {
unsafe {
gl::Clear(gl::COLOR_BUFFER_BIT | gl::DEPTH_BUFFER_BIT);
gl::ClearColor(0.0, 0.0, 0.0, 1.0);
}
}
Drawing the texture:
pub fn draw_quad(tex: &Texture2D, program : &Program, mesh : &Mesh, projection : &Matrix4<f32>, position : Vector3<f32>) {
program.bind();
mesh.vao.bind();
tex.bind();
if let Some(location) = program.get_uniform_location("view") {
let camera_transform = Matrix4::<f32>::identity() * Matrix4::from_scale(1.0);
program.set_matrix(location, &[camera_transform]);
}
if let Some(location) = program.get_uniform_location("projection") {
program.set_matrix(location, &[*projection]);
}
if let Some(location) = program.get_uniform_location("model") {
let model = {
let translate = Matrix4::<f32>::from_translation(position);
let (w, h) = tex.get_dimensions();
let scale = Matrix4::<f32>::from_nonuniform_scale(w as f32, h as f32, 1.0);
translate * scale
};
program.set_matrix(location, &[model]);
}
let count = mesh.count as i32;
unsafe {
gl::DrawArrays(gl::TRIANGLES, 0, count);
}
}
Then I move the texture
I know the pink is coming from my color text param. But I shouldn't be able to see it? I should be only drawing the image and not the border.
let quad_vertices: [f32; 30] = [
// Positions // Texcoords
1.0, -1.0, 0.0, 1.0, 0.0,
-1.0, -1.0, 0.0, 0.0, 0.0,
-1.0, 1.0, 0.0, 0.0, 1.0,
1.0, 1.0, 0.0, 1.0, 1.0,
1.0, -1.0, 0.0, 1.0, 0.0,
-1.0, 1.0, 0.0, 0.0, 1.0,
];
Vertex Shader
#version 330 core
layout (location = 0) in vec3 vertex;
layout (location = 2) in vec2 texcoords;
out vec2 TexCoords;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
TexCoords = texcoords;
gl_Position = projection * view * model * vec4(vertex.xy, 0.0, 1.0);
}
Fragment Shader
#version 330 core
in vec2 TexCoords;
out vec4 color;
uniform sampler2D image;
void main()
{
color = texture(image, TexCoords);
}

Texture coordinates shift when apply Y rotation in vertex shader

Im trying to rotate plane from two triangles in shader.
everything goes fine except i cant understand what with the UV coordinates values happens when i applying rotation.
How i can perform this rotation with expected results?
this looks as UV coordinates interpolated independent for each triangle.
let pid, rotateYlocation;
let gl = canvas.getContext('webgl');
let values = document.querySelector('span')
let loader = new Image();
loader.crossOrigin = "anonymous";
loader.src = "https://i.imgur.com/G9H683l.jpg";
loader.onload = function() {
canvas.width = loader.width;
canvas.height = loader.height;
pid = gl.createProgram();
shader(`
float perspective = 1.0;
attribute vec2 coords;
uniform float rotateY;
varying vec2 uv;
void main(void) {
mat3 rotY = mat3(vec3( cos(rotateY), 0.0, sin(rotateY)),
vec3( 0.0, 1.0, 0.0),
vec3( -sin(rotateY), 0.0, cos(rotateY)));
vec3 p = vec3(coords.xy, 0.) * rotY;
uv = coords.xy.xy*0.5 + 0.5;
gl_Position = vec4(p / (1.0 + p.z * perspective), 1.0);
}
`, gl.VERTEX_SHADER);
shader(`
precision highp float;
uniform sampler2D texture;
varying vec2 uv;
void main(void) {
gl_FragColor = texture2D(texture, uv);
}
`, gl.FRAGMENT_SHADER);
gl.linkProgram(pid);
gl.useProgram(pid);
let array = new Float32Array([-1.0, -1.0, 1.0, -1.0, -1.0, 1.0,
-1.0, 1.0, 1.0, -1.0, 1.0, 1.0]);
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ARRAY_BUFFER, array, gl.STATIC_DRAW);
let al = gl.getAttribLocation(pid, "coords");
gl.vertexAttribPointer(al, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(al);
let texture = gl.createTexture();
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true)
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, loader);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.uniform1i(gl.getUniformLocation(pid, "texture"), 0);
rotateYlocation = gl.getUniformLocation(pid, 'rotateY');
draw();
}
function draw() {
gl.viewport(0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight);
gl.clearColor(0, 0, 0, 0);
gl.uniform1f(rotateYlocation, rotateY.value/1000-0.5)
gl.drawArrays(gl.TRIANGLES, 0, 6);
values.textContent = `rotateY: ${(rotateY.value/1000-0.5).toFixed(3)}`;
}
function shader(src, type) {
let sid = gl.createShader(type);
gl.shaderSource(sid, src);
gl.compileShader(sid);
var message = gl.getShaderInfoLog(sid);
gl.attachShader(pid, sid);
if (message.length > 0) {
console.log(src.split('\n').map(function (str, i) {
return ("" + (1 + i)).padStart(4, "0") + ": " + str
}).join('\n'));
throw message;
}
}
<input type="range" id="rotateY" min="0" max="1000" value="500" onmousemove="draw()">
<span></span><br>
<canvas id="canvas" style="zoom:0.4"></canvas>
varying variables are interpolated in a perspective correct manner. gl_Position is a Homogeneous coordinates (x, y, z, w). For a perspective correct interpolation, the coordinate has to be set correctly.
When you do
gl_Position = vec4(p / (1.0 + p.z * perspective), 1.0)
then the perspective interpolation can not be performed correctly, because the coordinate is just a Cartesian coordinate (x, y, z, 1) and this results in a linear interpolation, because w=1.
For a detailed information of perspective correct interpolation see How exactly does OpenGL do perspectively correct linear interpolation?.
A homogeneous coordinate is transformed to the cartesian normalized device coordinate by a Perspective divide.
You have to set the clipspace coordinate, that coordinate before the perspective divide:
gl_Position = vec4(p, 1.0 + p.z * perspective);
let pid, rotateYlocation;
let gl = canvas.getContext('webgl');
let values = document.querySelector('span')
let loader = new Image();
loader.crossOrigin = "anonymous";
loader.src = "https://i.imgur.com/G9H683l.jpg";
loader.onload = function() {
canvas.width = loader.width;
canvas.height = loader.height;
pid = gl.createProgram();
shader(`
float perspective = 1.0;
attribute vec2 coords;
uniform float rotateY;
varying vec2 uv;
void main(void) {
mat3 rotY = mat3(vec3( cos(rotateY), 0.0, sin(rotateY)),
vec3( 0.0, 1.0, 0.0),
vec3( -sin(rotateY), 0.0, cos(rotateY)));
vec3 p = vec3(coords.xy, 0.) * rotY;
uv = coords.xy.xy*0.5 + 0.5;
gl_Position = vec4(p, 1.0 + p.z * perspective);
}
`, gl.VERTEX_SHADER);
shader(`
precision highp float;
uniform sampler2D texture;
varying vec2 uv;
void main(void) {
gl_FragColor = texture2D(texture, uv);
}
`, gl.FRAGMENT_SHADER);
gl.linkProgram(pid);
gl.useProgram(pid);
let array = new Float32Array([-1.0, -1.0, 1.0, -1.0, -1.0, 1.0,
-1.0, 1.0, 1.0, -1.0, 1.0, 1.0]);
gl.bindBuffer(gl.ARRAY_BUFFER, gl.createBuffer());
gl.bufferData(gl.ARRAY_BUFFER, array, gl.STATIC_DRAW);
let al = gl.getAttribLocation(pid, "coords");
gl.vertexAttribPointer(al, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(al);
let texture = gl.createTexture();
gl.pixelStorei(gl.UNPACK_FLIP_Y_WEBGL, true)
gl.bindTexture(gl.TEXTURE_2D, texture);
gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, loader);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
gl.uniform1i(gl.getUniformLocation(pid, "texture"), 0);
rotateYlocation = gl.getUniformLocation(pid, 'rotateY');
draw();
}
function draw() {
gl.viewport(0, 0, gl.drawingBufferWidth, gl.drawingBufferHeight);
gl.clearColor(0, 0, 0, 0);
gl.uniform1f(rotateYlocation, rotateY.value/1000-0.5)
gl.drawArrays(gl.TRIANGLES, 0, 6);
values.textContent = `rotateY: ${(rotateY.value/1000-0.5).toFixed(3)}`;
}
function shader(src, type) {
let sid = gl.createShader(type);
gl.shaderSource(sid, src);
gl.compileShader(sid);
var message = gl.getShaderInfoLog(sid);
gl.attachShader(pid, sid);
if (message.length > 0) {
console.log(src.split('\n').map(function (str, i) {
return ("" + (1 + i)).padStart(4, "0") + ": " + str
}).join('\n'));
throw message;
}
}
<input type="range" id="rotateY" min="0" max="1000" value="500" onmousemove="draw()">
<span></span><br>
<canvas id="canvas" style="zoom:0.4"></canvas>

Implementing depth testing for semi-transparent objects

I've been carefully trolling the internet for the past two days to understand depth testing for semi-transparent objects. I've read multiple papers/tutorials on the subject and in theory I believe I understand how it works. However none of them give me actual example code.
I have three requirements for my depth testing of semi-transparent objects:
It should be order independant.
It should work if two quads of the same objects are intersection each other. Both semi-transparent. Imagine a grass object that looks like a X when viewed from above:
It should correctly render a semi-transparent player rgba(0, 1, 0, 0.5), behind a building's window rgba(0, 0, 1, 0.5), but in front of a background object rgba(1, 0, 0, 1):
The line on the far left is how I imagine the light/color changes as it travels through the semi-transparent objects towards the camera
Final Thoughts
I suspect the best approach to go for is to do depth peeling, but I'm still lacking some implementation/example. I'm leaning towards this approach because the game is 2.5D and since it could get dangerous for performance (lots of layers to peel), there won't need to be more than two semi-transparent objects to "peel".
I'm already familiar with framebuffers and how to code them (doing some post processing effects with them). I will be using them, right?
Most of the knowledge of opengl comes from this tutorial but it covers depth testing and semi-transparency separately. He also sadly doesn't cover order independent transparency at all (see bottom of Blending page).
Finally, please don't answer only in theory. e.g.
Draw opaque, draw transparent, draw opaque again, etc.
My ideal answer will contain code of how the buffers are configured, the shaders, and screenshots of each pass with an explanation of what its doing.
The programming language used is also not too important as long as it uses OpenGL 4 or newer. The non-opengl code can be pseudo (I don't care how you sort an array or create an GLFW window).
EDIT:
I'm updating my question to just have so example of the current state of my code. This example draws the semi-transparent player (green) first, opaque background (red) second and then the semi-transparent window (blue). However the depth should be calculated by the Z position of the square and not the order of which it is drawn.
(function() {
// your page initialization code here
// the DOM will be available here
var script = document.createElement('script');
script.onload = function () {
main();
};
script.src = 'https://mdn.github.io/webgl-examples/tutorial/gl-matrix.js';
document.head.appendChild(script); //or something of the likes
})();
//
// Start here
//
function main() {
const canvas = document.querySelector('#glcanvas');
const gl = canvas.getContext('webgl', {alpha:false});
// If we don't have a GL context, give up now
if (!gl) {
alert('Unable to initialize WebGL. Your browser or machine may not support it.');
return;
}
// Vertex shader program
const vsSource = `
attribute vec4 aVertexPosition;
attribute vec4 aVertexColor;
uniform mat4 uModelViewMatrix;
uniform mat4 uProjectionMatrix;
varying lowp vec4 vColor;
void main(void) {
gl_Position = uProjectionMatrix * uModelViewMatrix * aVertexPosition;
vColor = aVertexColor;
}
`;
// Fragment shader program
const fsSource = `
varying lowp vec4 vColor;
void main(void) {
gl_FragColor = vColor;
}
`;
// Initialize a shader program; this is where all the lighting
// for the vertices and so forth is established.
const shaderProgram = initShaderProgram(gl, vsSource, fsSource);
// Collect all the info needed to use the shader program.
// Look up which attributes our shader program is using
// for aVertexPosition, aVevrtexColor and also
// look up uniform locations.
const programInfo = {
program: shaderProgram,
attribLocations: {
vertexPosition: gl.getAttribLocation(shaderProgram, 'aVertexPosition'),
vertexColor: gl.getAttribLocation(shaderProgram, 'aVertexColor'),
},
uniformLocations: {
projectionMatrix: gl.getUniformLocation(shaderProgram, 'uProjectionMatrix'),
modelViewMatrix: gl.getUniformLocation(shaderProgram, 'uModelViewMatrix'),
},
};
// Here's where we call the routine that builds all the
// objects we'll be drawing.
const buffers = initBuffers(gl);
// Draw the scene
drawScene(gl, programInfo, buffers);
}
//
// initBuffers
//
// Initialize the buffers we'll need. For this demo, we just
// have one object -- a simple two-dimensional square.
//
function initBuffers(gl) {
// Create a buffer for the square's positions.
const positionBuffer0 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer0);
// Now create an array of positions for the square.
var positions = [
0.5, 0.5,
-0.5, 0.5,
0.5, -0.5,
-0.5, -0.5,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
var colors = [
0.0, 1.0, 0.0, 0.5, // white
0.0, 1.0, 0.0, 0.5, // red
0.0, 1.0, 0.0, 0.5, // green
0.0, 1.0, 0.0, 0.5, // blue
];
const colorBuffer0 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer0);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
// Create a buffer for the square's positions.
const positionBuffer1 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer1);
// Now create an array of positions for the square.
positions = [
2.0, 0.4,
-2.0, 0.4,
2.0, -2.0,
-2.0, -2.0,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
colors = [
1.0, 0.0, 0.0, 1.0, // white
1.0, 0.0, 0.0, 1.0, // red
1.0, 0.0, 0.0, 1.0, // green
1.0, 0.0, 0.0, 1.0, // blue
];
const colorBuffer1 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer1);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
// Create a buffer for the square's positions.
const positionBuffer2 = gl.createBuffer();
// Select the positionBuffer as the one to apply buffer
// operations to from here out.
gl.bindBuffer(gl.ARRAY_BUFFER, positionBuffer2);
// Now create an array of positions for the square.
positions = [
1.0, 1.0,
-0.0, 1.0,
1.0, -1.0,
-0.0, -1.0,
];
// Now pass the list of positions into WebGL to build the
// shape. We do this by creating a Float32Array from the
// JavaScript array, then use it to fill the current buffer.
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(positions), gl.STATIC_DRAW);
// Now set up the colors for the vertices
colors = [
0.0, 0.0, 1.0, 0.5, // white
0.0, 0.0, 1.0, 0.5, // red
0.0, 0.0, 1.0, 0.5, // green
0.0, 0.0, 1.0, 0.5, // blue
];
const colorBuffer2 = gl.createBuffer();
gl.bindBuffer(gl.ARRAY_BUFFER, colorBuffer2);
gl.bufferData(gl.ARRAY_BUFFER, new Float32Array(colors), gl.STATIC_DRAW);
return {
position0: positionBuffer0,
color0: colorBuffer0,
position1: positionBuffer1,
color1: colorBuffer1,
position2: positionBuffer2,
color2: colorBuffer2,
};
}
//
// Draw the scene.
//
function drawScene(gl, programInfo, buffers) {
gl.clearColor(0.0, 0.0, 0.0, 1.0); // Clear to black, fully opaque
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
//gl.clearDepth(1.0); // Clear everything
gl.disable(gl.DEPTH_TEST)
gl.enable(gl.BLEND)
gl.blendEquation(gl.FUNC_ADD)
gl.blendFunc(gl.SRC_ALPHA, gl.ONE_MINUS_SRC_ALPHA)
// Clear the canvas before we start drawing on it.
// Create a perspective matrix, a special matrix that is
// used to simulate the distortion of perspective in a camera.
// Our field of view is 45 degrees, with a width/height
// ratio that matches the display size of the canvas
// and we only want to see objects between 0.1 units
// and 100 units away from the camera.
const fieldOfView = 45 * Math.PI / 180; // in radians
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const zNear = 0.1;
const zFar = 100.0;
const projectionMatrix = mat4.create();
// note: glmatrix.js always has the first argument
// as the destination to receive the result.
mat4.perspective(projectionMatrix,
fieldOfView,
aspect,
zNear,
zFar);
// Set the drawing position to the "identity" point, which is
// the center of the scene.
const modelViewMatrix = mat4.create();
// Now move the drawing position a bit to where we want to
// start drawing the square.
mat4.translate(modelViewMatrix, // destination matrix
modelViewMatrix, // matrix to translate
[-0.0, 0.0, -6.0]); // amount to translate
function drawSquare(positionbuffer, colorbuffer) {
// Tell WebGL how to pull out the positions from the position
// buffer into the vertexPosition attribute
{
const numComponents = 2;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, positionbuffer);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexPosition,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.vertexPosition);
}
// Tell WebGL how to pull out the colors from the color buffer
// into the vertexColor attribute.
{
const numComponents = 4;
const type = gl.FLOAT;
const normalize = false;
const stride = 0;
const offset = 0;
gl.bindBuffer(gl.ARRAY_BUFFER, colorbuffer);
gl.vertexAttribPointer(
programInfo.attribLocations.vertexColor,
numComponents,
type,
normalize,
stride,
offset);
gl.enableVertexAttribArray(
programInfo.attribLocations.vertexColor);
}
// Tell WebGL to use our program when drawing
gl.useProgram(programInfo.program);
// Set the shader uniforms
gl.uniformMatrix4fv(
programInfo.uniformLocations.projectionMatrix,
false,
projectionMatrix);
gl.uniformMatrix4fv(
programInfo.uniformLocations.modelViewMatrix,
false,
modelViewMatrix);
{
const offset = 0;
const vertexCount = 4;
gl.drawArrays(gl.TRIANGLE_STRIP, offset, vertexCount);
}
}
drawSquare(buffers.position0, buffers.color0); // Player
drawSquare(buffers.position1, buffers.color1); // Background
drawSquare(buffers.position2, buffers.color2); // Window
}
//
// Initialize a shader program, so WebGL knows how to draw our data
//
function initShaderProgram(gl, vsSource, fsSource) {
const vertexShader = loadShader(gl, gl.VERTEX_SHADER, vsSource);
const fragmentShader = loadShader(gl, gl.FRAGMENT_SHADER, fsSource);
// Create the shader program
const shaderProgram = gl.createProgram();
gl.attachShader(shaderProgram, vertexShader);
gl.attachShader(shaderProgram, fragmentShader);
gl.linkProgram(shaderProgram);
// If creating the shader program failed, alert
if (!gl.getProgramParameter(shaderProgram, gl.LINK_STATUS)) {
alert('Unable to initialize the shader program: ' + gl.getProgramInfoLog(shaderProgram));
return null;
}
return shaderProgram;
}
//
// creates a shader of the given type, uploads the source and
// compiles it.
//
function loadShader(gl, type, source) {
const shader = gl.createShader(type);
// Send the source to the shader object
gl.shaderSource(shader, source);
// Compile the shader program
gl.compileShader(shader);
// See if it compiled successfully
if (!gl.getShaderParameter(shader, gl.COMPILE_STATUS)) {
alert('An error occurred compiling the shaders: ' + gl.getShaderInfoLog(shader));
gl.deleteShader(shader);
return null;
}
return shader;
}
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width">
<title></title>
</head>
<body>
<canvas id="glcanvas" width="640" height="480"></canvas>
</body>
</html>
This seems to be the what the paper linked by ripi2 is doing
function main() {
const m4 = twgl.m4;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
uniform mat4 u_matrix;
void main() {
gl_Position = u_matrix * position;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 Ci;
out vec4 fragData[2];
float w(float z, float a) {
return a * max(pow(10.0,-2.0),3.0*pow(10.0,3.0)*pow((1.0 - z), 3.));
}
void main() {
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const bufferInfo = twgl.primitives.createXYQuadBufferInfo(gl);
const fbi = twgl.createFramebufferInfo(
gl,
[
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
]);
function render(time) {
time *= 0.001;
twgl.setBuffersAndAttributes(gl, transparentProgramInfo, bufferInfo);
// drawOpaqueSurfaces();
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_matrix: m4.identity(),
});
twgl.drawBufferInfo(gl, bufferInfo);
twgl.bindFramebufferInfo(gl, fbi);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false);
gl.enable(gl.BLEND);
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
// drawTransparentSurfaces();
const quads = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
quads.forEach((color, ndx) => {
const u = ndx / (quads.length - 1);
// change the order every second
const v = ((ndx + time | 0) % quads.length) / (quads.length - 1);
const xy = (u * 2 - 1) * .25;
const z = (v * 2 - 1) * .25;
let mat = m4.identity();
mat = m4.translate(mat, [xy, xy, z]);
mat = m4.scale(mat, [.3, .3, 1]);
twgl.setUniforms(transparentProgramInfo, {
Ci: color,
u_matrix: mat,
});
twgl.drawBufferInfo(gl, bufferInfo);
});
twgl.bindFramebufferInfo(gl, null);
gl.drawBuffers([gl.BACK]);
gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
gl.useProgram(compositeProgramInfo.program);
twgl.setUniforms(compositeProgramInfo, {
ATexture: fbi.attachments[0],
BTexture: fbi.attachments[1],
u_matrix: m4.identity(),
});
twgl.drawBufferInfo(gl, bufferInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
Some things to note:
It's using WebGL2 but it should be possible in WebGL1, you'd have to change the shaders to use GLSL ES 1.0.
It's using floating point textures. The paper mentions you can use half float textures as well. Note that rendering to both half and float textures is an optional feature in even WebGL2. I believe most mobile hardware can render to half but not to float.
It's using weight equation 10 from the paper. There are 4 weight equations in the paper. 7, 8, 9, and 10. To do 7, 8, or 9 you'd need to pass in view space z from the vertex shader to the fragment shader
It's switching the order of drawing every second
The code is pretty straight forward.
It creates 3 shaders. One to draw a checkerboard just so we have something that is opaque to see the transparent stuff drawn above. One is the transparent object shader. The last is the shader the composites the transparent stuff into the scene.
Next it makes 2 textures, a floating point RGBA32F texture and a floating point R32F texture (red channel only). It attaches those to a framebuffer. (that is all done in the 1 function, twgl.createFramebufferInfo. That function makes the textures the same size as the canvas by default.
We make a single quad that goes from -1 to +1
We use that quad to draw the checkerboard into the canvas
Then we turn on blending, setup the blend equations as the paper said, switch to rendering onto our framebuffer, clear that framebuffer. note, it's cleared to 0,0,0,1 and 1 respectively. This is the version where we don't have separate blend functions per draw buffer. If you switch to the version that can use separate blending functions per draw buffer then you need to clear to different values and use a different shader (See paper)
Using our transparency shader we that same quad to draw 6 rectangles each of a solid color. I just used a solid color to keep it simple. Each is at a different Z and the Zs change every second just to see the results Z changing.
In the shader Ci is the input color. It's expected to be a premultiplied alpha color according to the paper. fragData[0]is the "accumulate" texture andfragData[1]is the "revealage" texture and is only one channel, red. Thew` function represents the equation 10 from the paper.
After all 6 quads are drawn we switch back to rendering to the canvas and use the compositing shader to composite the transparency result with the non-transparent canvas contents.
Here's an example with some geometry. Differences:
It's using equations (7) from the paper instead of (10)
In order to do correct zbuffering the depth buffer needs to be shared when doing opaque and transparent rendering. So there are 2 frames buffers. One buffer has RGBA8 + depth, the other is RGBA32F + R32F + depth. The depth buffer is shared.
The transparent renderer computes simple lighting and then uses the result as the Ci value from the paper
After compositing the transparent into the opaque we still need to copy the opaque into the canvas to see the result
function main() {
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
layout(location=1) in vec3 normal;
uniform mat4 u_projection;
uniform mat4 u_modelView;
out vec4 v_viewPosition;
out vec3 v_normal;
void main() {
gl_Position = u_projection * u_modelView * position;
v_viewPosition = u_modelView * position;
v_normal = (u_modelView * vec4(normal, 0)).xyz;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const opaqueFS = `
#version 300 es
precision highp float;
in vec4 v_viewPosition;
in vec3 v_normal;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
out vec4 fragColor;
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
fragColor = vec4(u_color.rgb * light, u_color.a);
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
in vec4 v_viewPosition;
in vec3 v_normal;
out vec4 fragData[2];
// eq (7)
float w(float z, float a) {
return a * max(
pow(10.0, -2.0),
min(
3.0 * pow(10.0, 3.0),
10.0 /
(pow(10.0, -5.0) +
pow(abs(z) / 5.0, 2.0) +
pow(abs(z) / 200.0, 6.0)
)
)
);
}
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
vec4 Ci = vec4(u_color.rgb * light, u_color.a);
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
fragColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
}
`;
const blitFS = `
#version 300 es
precision highp float;
uniform sampler2D u_texture;
out vec4 fragColor;
void main() {
fragColor = texelFetch(u_texture, ivec2(gl_FragCoord.xy), 0);
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const blitProgramInfo = twgl.createProgramInfo(gl, [vs, blitFS]);
const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
function makeVAO(programInfo, bufferInfo) {
return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
}
// In order to do proper zbuffering we need to share
// the depth buffer
const opaqueAttachments = [
{ internalFormat: gl.RGBA8, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
];
const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
const transparentAttachments = [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
];
const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// if the canvas is resized also resize the framebuffer
// attachments (the depth buffer will be resized twice
// but I'm too lazy to fix it)
twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
}
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const fov = 45 * Math.PI / 180;
const zNear = 0.1;
const zFar = 500;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, -5];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const lightDirection = v3.normalize([1, 3, 5]);
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.depthMask(true);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
// drawOpaqueSurfaces();
// draw checkerboard
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
// draw a cube with depth buffer
gl.enable(gl.DEPTH_TEST);
{
gl.useProgram(opaqueProgramInfo.program);
gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
let mat = view;
mat = m4.rotateX(mat, time * .1);
mat = m4.rotateY(mat, time * .2);
mat = m4.scale(mat, [1.5, 1.5, 1.5]);
twgl.setUniforms(opaqueProgramInfo, {
u_color: [1, .5, .2, 1],
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
}
twgl.bindFramebufferInfo(gl, transparentFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
// these values change if using separate blend functions
// per attachment (something WebGL2 does not support)
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false); // don't write to depth buffer (but still testing)
gl.enable(gl.BLEND);
// this changes if using separate blend functions per attachment
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);
// drawTransparentSurfaces();
const spheres = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
spheres.forEach((color, ndx) => {
const u = ndx + 2;
let mat = view;
mat = m4.rotateX(mat, time * u * .1);
mat = m4.rotateY(mat, time * u * .2);
mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
twgl.setUniforms(transparentProgramInfo, {
u_color: color,
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
});
// composite transparent results with opaque
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.disable(gl.DEPTH_TEST);
gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
gl.useProgram(compositeProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(compositeProgramInfo, {
ATexture: transparentFBI.attachments[0],
BTexture: transparentFBI.attachments[1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
// draw opaque color buffer into canvas
// could probably use gl.blitFramebuffer
gl.disable(gl.BLEND);
twgl.bindFramebufferInfo(gl, null);
gl.useProgram(blitProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(blitProgramInfo, {
u_texture: opaqueFBI.attachments[0],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
It occurs to me rather than use standard OpenGL blending for the last 2 steps (composite followed by blit) we could change the composite shader so it takes 3 textures (ATexutre, BTexture, opaqueTexture) and blends in the shader outputting directly to the canvas. That would be faster.
function main() {
const m4 = twgl.m4;
const v3 = twgl.v3;
const gl = document.querySelector('canvas').getContext('webgl2', {alpha: false});
if (!gl) {
alert('need WebGL2');
return;
}
const ext = gl.getExtension('EXT_color_buffer_float');
if (!ext) {
alert('EXT_color_buffer_float');
return;
}
const vs = `
#version 300 es
layout(location=0) in vec4 position;
layout(location=1) in vec3 normal;
uniform mat4 u_projection;
uniform mat4 u_modelView;
out vec4 v_viewPosition;
out vec3 v_normal;
void main() {
gl_Position = u_projection * u_modelView * position;
v_viewPosition = u_modelView * position;
v_normal = (u_modelView * vec4(normal, 0)).xyz;
}
`;
const checkerFS = `
#version 300 es
precision highp float;
uniform vec4 color1;
uniform vec4 color2;
out vec4 fragColor;
void main() {
ivec2 grid = ivec2(gl_FragCoord.xy) / 32;
fragColor = mix(color1, color2, float((grid.x + grid.y) % 2));
}
`;
const opaqueFS = `
#version 300 es
precision highp float;
in vec4 v_viewPosition;
in vec3 v_normal;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
out vec4 fragColor;
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
fragColor = vec4(u_color.rgb * light, u_color.a);
}
`;
const transparentFS = `
#version 300 es
precision highp float;
uniform vec4 u_color;
uniform vec3 u_lightDirection;
in vec4 v_viewPosition;
in vec3 v_normal;
out vec4 fragData[2];
// eq (7)
float w(float z, float a) {
return a * max(
pow(10.0, -2.0),
min(
3.0 * pow(10.0, 3.0),
10.0 /
(pow(10.0, -5.0) +
pow(abs(z) / 5.0, 2.0) +
pow(abs(z) / 200.0, 6.0)
)
)
);
}
void main() {
float light = abs(dot(normalize(v_normal), u_lightDirection));
vec4 Ci = vec4(u_color.rgb * light, u_color.a);
float ai = Ci.a;
float zi = gl_FragCoord.z;
float wresult = w(zi, ai);
fragData[0] = vec4(Ci.rgb * wresult, ai);
fragData[1].r = ai * wresult;
}
`;
const compositeFS = `
#version 300 es
precision highp float;
uniform sampler2D ATexture;
uniform sampler2D BTexture;
uniform sampler2D opaqueTexture;
out vec4 fragColor;
void main() {
vec4 accum = texelFetch(ATexture, ivec2(gl_FragCoord.xy), 0);
float r = accum.a;
accum.a = texelFetch(BTexture, ivec2(gl_FragCoord.xy), 0).r;
vec4 transparentColor = vec4(accum.rgb / clamp(accum.a, 1e-4, 5e4), r);
vec4 opaqueColor = texelFetch(opaqueTexture, ivec2(gl_FragCoord.xy), 0);
// gl.blendFunc(gl.ONE_MINUS_SRC_ALPHA, gl.SRC_ALPHA);
fragColor = transparentColor * (1. - r) + opaqueColor * r;
}
`;
const checkerProgramInfo = twgl.createProgramInfo(gl, [vs, checkerFS]);
const opaqueProgramInfo = twgl.createProgramInfo(gl, [vs, opaqueFS]);
const transparentProgramInfo = twgl.createProgramInfo(gl, [vs, transparentFS]);
const compositeProgramInfo = twgl.createProgramInfo(gl, [vs, compositeFS]);
const xyQuadVertexArrayInfo = makeVAO(checkerProgramInfo, twgl.primitives.createXYQuadBufferInfo(gl));
const sphereVertexArrayInfo = makeVAO(transparentProgramInfo, twgl.primitives.createSphereBufferInfo(gl, 1, 16, 12));
const cubeVertexArrayInfo = makeVAO(opaqueProgramInfo, twgl.primitives.createCubeBufferInfo(gl, 1, 1));
function makeVAO(programInfo, bufferInfo) {
return twgl.createVertexArrayInfo(gl, programInfo, bufferInfo);
}
// In order to do proper zbuffering we need to share
// the depth buffer
const opaqueAttachments = [
{ internalFormat: gl.RGBA8, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST },
];
const opaqueFBI = twgl.createFramebufferInfo(gl, opaqueAttachments);
const transparentAttachments = [
{ internalFormat: gl.RGBA32F, minMag: gl.NEAREST },
{ internalFormat: gl.R32F, minMag: gl.NEAREST },
{ format: gl.DEPTH_COMPONENT16, minMag: gl.NEAREST, attachment: opaqueFBI.attachments[1] },
];
const transparentFBI = twgl.createFramebufferInfo(gl, transparentAttachments);
function render(time) {
time *= 0.001;
if (twgl.resizeCanvasToDisplaySize(gl.canvas)) {
// if the canvas is resized also resize the framebuffer
// attachments (the depth buffer will be resized twice
// but I'm too lazy to fix it)
twgl.resizeFramebufferInfo(gl, opaqueFBI, opaqueAttachments);
twgl.resizeFramebufferInfo(gl, transparentFBI, transparentAttachments);
}
const aspect = gl.canvas.clientWidth / gl.canvas.clientHeight;
const fov = 45 * Math.PI / 180;
const zNear = 0.1;
const zFar = 500;
const projection = m4.perspective(fov, aspect, zNear, zFar);
const eye = [0, 0, -5];
const target = [0, 0, 0];
const up = [0, 1, 0];
const camera = m4.lookAt(eye, target, up);
const view = m4.inverse(camera);
const lightDirection = v3.normalize([1, 3, 5]);
twgl.bindFramebufferInfo(gl, opaqueFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0]);
gl.depthMask(true);
gl.clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
// drawOpaqueSurfaces();
// draw checkerboard
gl.useProgram(checkerProgramInfo.program);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
twgl.setUniforms(checkerProgramInfo, {
color1: [.5, .5, .5, 1],
color2: [.7, .7, .7, 1],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
// draw a cube with depth buffer
gl.enable(gl.DEPTH_TEST);
{
gl.useProgram(opaqueProgramInfo.program);
gl.bindVertexArray(cubeVertexArrayInfo.vertexArrayObject);
let mat = view;
mat = m4.rotateX(mat, time * .1);
mat = m4.rotateY(mat, time * .2);
mat = m4.scale(mat, [1.5, 1.5, 1.5]);
twgl.setUniforms(opaqueProgramInfo, {
u_color: [1, .5, .2, 1],
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, cubeVertexArrayInfo);
}
twgl.bindFramebufferInfo(gl, transparentFBI);
gl.drawBuffers([gl.COLOR_ATTACHMENT0, gl.COLOR_ATTACHMENT1]);
// these values change if using separate blend functions
// per attachment (something WebGL2 does not support)
gl.clearBufferfv(gl.COLOR, 0, new Float32Array([0, 0, 0, 1]));
gl.clearBufferfv(gl.COLOR, 1, new Float32Array([1, 1, 1, 1]));
gl.depthMask(false); // don't write to depth buffer (but still testing)
gl.enable(gl.BLEND);
// this changes if using separate blend functions per attachment
gl.blendFuncSeparate(gl.ONE, gl.ONE, gl.ZERO, gl.ONE_MINUS_SRC_ALPHA);
gl.useProgram(transparentProgramInfo.program);
gl.bindVertexArray(sphereVertexArrayInfo.vertexArrayObject);
// drawTransparentSurfaces();
const spheres = [
[ .4, 0, 0, .4],
[ .4, .4, 0, .4],
[ 0, .4, 0, .4],
[ 0, .4, .4, .4],
[ 0, .0, .4, .4],
[ .4, .0, .4, .4],
];
spheres.forEach((color, ndx) => {
const u = ndx + 2;
let mat = view;
mat = m4.rotateX(mat, time * u * .1);
mat = m4.rotateY(mat, time * u * .2);
mat = m4.translate(mat, [0, 0, 1 + ndx * .1]);
twgl.setUniforms(transparentProgramInfo, {
u_color: color,
u_lightDirection: lightDirection,
u_projection: projection,
u_modelView: mat,
});
twgl.drawBufferInfo(gl, sphereVertexArrayInfo);
});
// composite transparent results with opaque
twgl.bindFramebufferInfo(gl, null);
gl.disable(gl.DEPTH_TEST);
gl.disable(gl.BLEND);
gl.useProgram(compositeProgramInfo.program);
gl.bindVertexArray(xyQuadVertexArrayInfo.vertexArrayObject);
twgl.setUniforms(compositeProgramInfo, {
ATexture: transparentFBI.attachments[0],
BTexture: transparentFBI.attachments[1],
opaqueTexture: opaqueFBI.attachments[0],
u_projection: m4.identity(),
u_modelView: m4.identity(),
});
twgl.drawBufferInfo(gl, xyQuadVertexArrayInfo);
/* only needed if {alpha: false} not passed into getContext
gl.colorMask(false, false, false, true);
gl.clearColor(1, 1, 1, 1);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.colorMask(true, true, true, true);
*/
requestAnimationFrame(render);
}
requestAnimationFrame(render);
}
main();
body { margin: 0; }
canvas { width: 100vw; height: 100vh; display: block; }
<canvas></canvas>
<script src="https://twgljs.org/dist/4.x/twgl-full.min.js"></script>
I have three requirements for my depth testing of semi-transparent objects
It's actually quite rare to have self-intersecting objects with partially transparent (actually blended) samples. The common cases for self-intersecting geometry is grass and leaves. However, in these cases the actual areas covered by grass and leaves are not transparent - they are opaque.
The common solution here is alpha testing. Render the leaves as an opaque (not blended) quad (with a normal depth test and write), and discard fragments which have insufficient alpha (e.g. because they are outside of the leaf). Because individual samples here are opaque, then you get order independence for free because the depth test works as you would expect for an opaque object.
If you want blended edges, then enable alpha-to-coverage and let the multi-sample resolve clean up the edges a little.
For the small amount of actually transparent stuff you have left, then normally you need to a back-to-front sort on the CPU, and render it after the opaque pass.
Proper OIT is possible, but is is generally quite an expensive technique, so I've yet to see anyone actually use it outside of an academic environment (at least on mobile OpenGL ES implementations).

Drawing Antialiased circle using Shaders

I am new to shader programming. I am trying to draw a circle with glsl. I used a point with a Size and tried to filter out the points outside the radius.(Altering the alpha value).
The code is as follows:
Fragment Shader:
#version 130
varying vec2 textureCoordinate;
const float circleBorderWidth = 0.08;//for anti aliasing
void main() {
float d = smoothstep(circleBorderWidth,0.1, 1.0-length(textureCoordinate));
gl_FragColor = vec4(0.0, 1.0, 0.0, d);
}
Vertex Shader:
#version 130
attribute vec4 coord3d;
attribute vec2 varPos;
varying vec2 textureCoordinate;
void
main()
{
textureCoordinate = varPos;
gl_FrontColor = gl_Color;
gl_Position = vec4(coord3d.xyz,1.);
gl_PointSize = coord3d.w;
}
Data:
float pos[] = {
-1, -1,
-1, 1,
1, 1,
1, -1,
};
float vertices[]={0.0,0.0f,0.0f,100.0f};
Draw Method:
void drawScene() {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
program->makeCurrent();
glEnable(GL_POINT_SMOOTH);
glEnable(GL_VERTEX_PROGRAM_POINT_SIZE);
glEnable(GL_BLEND);
glBlendFunc (GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
if (varPosAttrib>=0) {
glVertexAttribPointer( varPosAttrib, 2, GL_FLOAT, GL_FALSE,
0, pos ); // -->varPos in Vertex Shader.
glEnableVertexAttribArray( varPosAttrib );
}
if (posAttrib>=0) {
glVertexAttribPointer(posAttrib, 4, GL_FLOAT, GL_FALSE, 0, vertices); // -->coord3d in vertex shader
glEnableVertexAttribArray(posAttrib);
glDrawArrays(GL_POINTS, 0, 1);
}
glDisable(GL_POINT_SMOOTH);
glDisable(GL_VERTEX_PROGRAM_POINT_SIZE);
glDisable(GL_BLEND);
program->release();
glutSwapBuffers(); //Send the 3D scene to the screen
}
This results in drawing a square if I replace d with 1.0 in the following line (in the fragment shader):
gl_FragColor = vec4(0.0, 1.0, 0.0, d); // -> if d is replaced by 1.0
I tried to replace the x and y values in gl_FragColor with textureCoordinate.x and textureCoordinate.y. Result was black (so I assume the values are 0.0). The thing which I don't understand is that if I take the length of textureCoordinate than it is always 1.0.(experimented by replacing the value in gl_fragcolor). I am unable to figure out as to what I am doing wrong here. I was expecting the textureCoordinate value to interpolate with respect to the passed in data (varPos).
Here's my current attempt at it. It works, in the sense that it draw a disc with a smooth border. I use a distance field approach ie. I compute the distance from the disc's border
Fragment shader
#version 110
varying vec2 uv;
void
main() {
float border = 0.01;
float radius = 0.5;
vec4 color0 = vec4(0.0, 0.0, 0.0, 1.0);
vec4 color1 = vec4(1.0, 1.0, 1.0, 1.0);
vec2 m = uv - vec2(0.5, 0.5);
float dist = radius - sqrt(m.x * m.x + m.y * m.y);
float t = 0.0;
if (dist > border)
t = 1.0;
else if (dist > 0.0)
t = dist / border;
gl_FragColor = mix(color0, color1, t);
}
Vertex shader
#version 110
varying vec2 uv;
void
main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
uv = vec2(gl_MultiTexCoord0);
}
It's meant to be drawn on quad with texture coordinate (-0.5, -0.5)x(0.5, 0.5)