Why is SSAO only working from certain angles/distances? - c++

I am trying to replicate the Sascha Willems SSAO example while using the LearnOpenGL SSAO tutorial as a resource. But my SSAO code is only partially covering models at certain angles/distances, and there is also a very strong self-occlusion effect when close to an object.
On the left is my renderer, and on the right side is the Sascha Willems SSAO Example:
Center: Wrong | Correct
Window: Wrong | Correct
Stairs: Wrong | Correct
EDIT: There is some strange artifacting on the Correct images from RenderDoc. Sorry about that.
Some notes about my renderer variables:
Position+Depth image is using VK_FORMAT_R32G32B32A32_SFLOAT format and looks correct in RenderDoc. [1] [2]
Normal image is using VK_FORMAT_R8G8B8A8_UNORM format and looks correct in RenderDoc. [1]
Position+Depth and Normal images are using a VkSampler with VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE.
SSAO image is VK_FORMAT_R8_UNORM and is being written correctly by the shader. [1]
SSAO Noise image is using VK_FORMAT_R32G32B32A32_SFLOAT format and looks correct in RenderDoc. [1]
SSAO Noise image is using a VkSampler with VK_SAMPLER_ADDRESS_MODE_REPEAT.
SSAO Noise
// Random Generator
std::default_random_engine rndEngine(static_cast<unsigned>(glfwGetTime()));
std::uniform_real_distribution<float> rndDist(0.0f, 1.0f);
// SSAO random noise
std::vector<glm::vec4> ssaoNoise(SSAO_NOISE_DIM * SSAO_NOISE_DIM);
for (uint32_t i = 0; i < static_cast<uint32_t>(ssaoNoise.size()); i++)
{
ssaoNoise[i] = glm::vec4(rndDist(rndEngine) * 2.0f - 1.0f, rndDist(rndEngine) * 2.0f - 1.0f, 0.0f, 0.0f);
}
SSAO Kernels
// Function for SSAOKernel generation
float lerp(float a, float b, float f)
{
return a + f * (b - a);
}
// SSAO sample kernel
std::vector<glm::vec4> ssaoKernel(SSAO_KERNEL_SIZE);
for (uint32_t i = 0; i < SSAO_KERNEL_SIZE; i++)
{
glm::vec3 sample(rndDist(rndEngine) * 2.0 - 1.0, rndDist(rndEngine) * 2.0 - 1.0, rndDist(rndEngine));
sample = glm::normalize(sample);
sample *= rndDist(rndEngine);
float scale = float(i) / float(SSAO_KERNEL_SIZE);
scale = lerp(0.1f, 1.0f, scale * scale);
ssaoKernel[i] = glm::vec4(sample * scale, 0.0f);
}
SSAO Kernel XY values are between -1.0 and 1.0, and Z values are between 0.0 and 1.0:
ssaoKernel XYZ[0]: X: -0.0428458 Y: 0.0578492 Z: 0.0569087
ssaoKernel XYZ[1]: X: 0.0191572 Y: 0.0442375 Z: 0.00108795
ssaoKernel XYZ[2]: X: 0.00155709 Y: 0.0287552 Z: 0.024916
ssaoKernel XYZ[3]: X: -0.0169349 Y: -0.0298343 Z: 0.0272303
ssaoKernel XYZ[4]: X: 0.0469432 Y: 0.0348599 Z: 0.0573885
(...)
ssaoKernel XYZ[31]: X: -0.104106 Y: -0.434528 Z: 0.321963
GLSL shaders
model.vert
mat3 normalMatrix = transpose(inverse(mat3(ubo.view * ubo.model)));
outNormalViewSpace = normalMatrix * inNormal;
outPositionViewSpace = vec3(ubo.view * ubo.model * vec4(inPosition, 1.0));
model.frag
// These are identical to the camera
float near = 0.1;
float far = 100.0;
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0;
return (2.0 * near * far) / (far + near - z * (far - near));
}
(...)
outNormalViewSpace = vec4(normalize(inNormalViewSpace) * 0.5 + 0.5, 1.0);
outPositionDepth = vec4(inPositionViewSpace, LinearizeDepth(gl_FragCoord.z));
fullscreen.vert
outUV = vec2((gl_VertexIndex << 1) & 2, gl_VertexIndex & 2);
gl_Position = vec4(outUV * 2.0f - 1.0f, 0.0f, 1.0f);
ssao.frag
#version 450
layout (location = 0) in vec2 inUV;
layout (constant_id = 1) const int SSAO_KERNEL_SIZE = 32;
layout (constant_id = 2) const float SSAO_RADIUS = 0.5;
layout (binding = 0) uniform sampler2D samplerPositionDepth;
layout (binding = 1) uniform sampler2D samplerNormal;
layout (binding = 2) uniform sampler2D samplerSSAONoise;
layout (binding = 3) uniform SSAOKernel
{
vec4 samples[SSAO_KERNEL_SIZE];
} ssaoKernel;
layout( push_constant ) uniform UniformBufferObject {
mat4 projection;
} ubo;
layout (location = 0) out float outSSAO;
void main()
{
//
// SSAO Post Processing (Pre-Blur)
//
// Get a random vector using a noise lookup
ivec2 texDim = textureSize(samplerPositionDepth, 0);
ivec2 noiseDim = textureSize(samplerSSAONoise, 0);
const vec2 noiseUV = vec2(float(texDim.x) / float(noiseDim.x), float(texDim.y) / (noiseDim.y)) * inUV;
vec3 randomVec = texture(samplerSSAONoise, noiseUV).xyz * 2.0 - 1.0;
// Get G-Buffer values
vec3 fragPos = texture(samplerPositionDepth, inUV).rgb;
vec3 normal = normalize(texture(samplerNormal, inUV).rgb * 2.0 - 1.0);
// Create TBN matrix
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(tangent, normal);
mat3 TBN = mat3(tangent, bitangent, normal);
// Calculate occlusion value
float occlusion = 0.0f;
for(int i = 0; i < SSAO_KERNEL_SIZE; i++)
{
vec3 samplePos = TBN * ssaoKernel.samples[i].xyz;
samplePos = fragPos + samplePos * SSAO_RADIUS;
// project
vec4 offset = vec4(samplePos, 1.0f);
offset = ubo.projection * offset;
offset.xyz /= offset.w;
offset.xyz = offset.xyz * 0.5f + 0.5f;
float sampleDepth = -texture(samplerPositionDepth, offset.xy).w;
// Range check
float rangeCheck = smoothstep(0.0f, 1.0f, SSAO_RADIUS / abs(fragPos.z - sampleDepth));
occlusion += (sampleDepth >= samplePos.z ? 1.0f : 0.0f) * rangeCheck;
}
occlusion = 1.0 - (occlusion / float(SSAO_KERNEL_SIZE));
outSSAO = occlusion;
}
There has to be a wrong setting or improper calculation somewhere, but I can't quite put my finger on it. Feel free to request additional code snippets if something pertinent is missing.
Any help is greatly appreciated, thank you!

Credit goes to mlkn for pointing out in the comments that the LinearizeDepth function did not look right. He was correct, there was an extra unnecessary "* 2.0 - 1.0" step that did not belong. Thank you mlkn! :)
This was the original, incorrect LinearizeDepth function:
float LinearizeDepth(float depth)
{
float z = depth * 2.0 - 1.0;
return (2.0 * near * far) / (far + near - z * (far - near));
}
By removing the first line, and changing it to this:
float LinearizeDepth(float depth)
{
return (2.0 * near * far) / (far + near - depth * (far - near));
}
My output immediately changed to this, which appears to be correct:

Related

drawing more than one of my glowing lights makes them share the light, cant split them

I am trying to create two independent glowing lights but when a make the second share the light stretches between the 2
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv =(fragCoord-.5*iResolution.xy)/iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(uv.x, uv.y+0.5);
vec2 glowPos2 = vec2(uv.x+0.5, uv.y+0.0);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv+glowPos2);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}
enter image description here
uv is a position relative to the fragment currently being processed. So the position of a light source must not depend on uv. e.g.:
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
The distance between 2 points is the length of the vector from one point to the other. A vector between 2 points is calculated by subtracting one point from the other, but not by calculating the sum:
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
The result for glowCol1 and glowCol2 can become negative. Thus, one light source would negatively affect the other. You must clamp the result in the range [0, 1]:
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
Complete and working shader:
void mainImage( out vec4 fragColor, in vec2 fragCoord )
{
// Normalized pixel coordinates (from 0 to 1)
vec2 uv = fragCoord/iResolution.xy;
uv = uv * 2.0 - 1.0;
uv.x *= iResolution.x / iResolution.y;
vec3 col = vec3(0.);
float radius = 0.5;
float glowSpeed = 1.;
vec2 glowPos = vec2(0.5, 0.5);
vec2 glowPos2 = vec2(-0.5, -0.5);
float glowCol1 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos);
float glowCol2 = radius * (cos(glowSpeed * iTime) + 6.) / 7. - length(uv-glowPos2);
glowCol1 = clamp(glowCol1, 0.0, 1.0);
glowCol2 = clamp(glowCol2, 0.0, 1.0);
col += vec3(glowCol1, glowCol1, 0.);
col += vec3(glowCol2, glowCol2, 0.);
// Output to screen
fragColor = vec4(col, 1);
}

Implementing a gooey effect with a shader (Processing 3)

I'm trying to replicate a web design trick known as "gooey effect" (see it live here).
It's a technique applying SVG filters on moving ellipses in order to get a blob-like motion. The process is rather simple:
apply a gaussian blur
increase the contrast of the alpha channel only
The combination of the two creates a blob effect
The last step (increasing the alpha channel contrast) is usually done through a "color matrix filter".
A color matrix is composed of 5 columns (RGBA + offset) and 4 rows.
The values in the first four columns are multiplied with the source red, green, blue, and alpha values respectively. The fifth column value is added (offset).
In CSS, increasing the alpha channel contrast is as simple as calling a SVG filter and specifying the contrast value (here 18):
<feColorMatrix in="blur" mode="matrix" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7" result="goo" />
In Processing though, it seems to be a bit more complicated. I believe (I may be wrong) the only way to apply a color matrix filter is to create one in a shader. After a few tries I came up with these (very basic) vertex and fragment shaders for color rendering:
colorvert.glsl
uniform mat4 transform;
attribute vec4 position;
attribute vec4 color;
varying vec4 vertColor;
uniform vec4 o=vec4(0, 0, 0, -9);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 60.0);
void main() {
gl_Position = transform * position;
vertColor = (color * colorMatrix) + o ;
}
colorfrag.glsl
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
varying vec4 vertColor;
void main() {
gl_FragColor = vertColor;
}
PROBLEM:
The color matrix is partially working: changing the RGB values do affect the colors but changing the alpha values (last row) don't !
When trying to combine the shader with a Gaussian filter, the drawn ellipse stays blurry even after I set the alpha channel contrast to 60 (like in the codepen example):
PShader colmat;
void setup() {
size(200, 200, P2D);
colmat = loadShader("colorfrag.glsl", "colorvert.glsl");
}
void draw() {
background(100);
shader(colmat);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
filter(BLUR,6);
}
The same thing happens when I implement the color matrix within #cansik 's Gaussian blur shader (from the PostFX library). I can see the colors changing but not the alpha contrast:
blurFrag.glsl
/ Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
uniform vec4 o=vec4(0, 0, 0, 0);
uniform lowp mat4 colorMatrix = mat4(1, 0.0, 0.0, 0.0,
0.0, 1, 0.0, 0.0,
0.0, 0.0, 1, 0.0,
0, 0.0, 0.0, 60.0); //Alpha contrast set to 60
varying vec2 center;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = (avgValue / coefficientSum ) * colorMatrix;
}
Setting glBlendFunc and enabling glEnable(GL_BLEND) in the main .pde file didn't fix the issue either.
sketch.pde
import ch.bildspur.postfx.builder.*;
import ch.bildspur.postfx.pass.*;
import ch.bildspur.postfx.*;
import processing.opengl.*;
import com.jogamp.opengl.*;
PostFX fx;
void setup() {
size(200, 200, P2D);
fx = new PostFX(this);
}
void draw() {
background(100);
GL gl = ((PJOGL)beginPGL()).gl.getGL();
gl.glEnable(GL.GL_BLEND);
gl.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE);
gl.glDisable(GL.GL_DEPTH_TEST);
noStroke();
fill(255, 30, 30);
ellipse(width/2, height/2, 40, 40);
fx.render().blur(80, 14).compose();
}
Questions:
Why does the alpha channel contrast not work ? How can I make it work ?
Is there something wrong with the way I implemented the color matrix ?
Do you know a better way to implement that gooey effect ?
Any help would be much appreciated !
Thank you
#noahbuddy from the Processing Forum could find a solution to the problem so I'm posting it here.
To preserve transparency, with or without shaders, use an offscreen
buffer (PGraphics). For example, saving a PNG image with transparent
background.
I removed the contrast matrix from #cansik 's blur shader and instead
put it into a separate filter.
blurfrag.glsl
// Adapted from:
// http://callumhay.blogspot.com/2010/09/gaussian-blur-shader-glsl.html
#ifdef GL_ES
precision mediump float;
precision mediump int;
#endif
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
// The inverse of the texture dimensions along X and Y
uniform vec2 texOffset;
varying vec4 vertColor;
varying vec4 vertTexCoord;
uniform int blurSize;
uniform int horizontalPass; // 0 or 1 to indicate vertical or horizontal pass
uniform float sigma; // The sigma value for the gaussian function: higher value means more blur
// A good value for 9x9 is around 3 to 5
// A good value for 7x7 is around 2.5 to 4
// A good value for 5x5 is around 2 to 3.5
// ... play around with this based on what you need <span class="Emoticon Emoticon1"><span>:)</span></span>
const float pi = 3.14159265;
void main() {
float numBlurPixelsPerSide = float(blurSize / 2);
vec2 blurMultiplyVec = 0 < horizontalPass ? vec2(1.0, 0.0) : vec2(0.0, 1.0);
// Incremental Gaussian Coefficent Calculation (See GPU Gems 3 pp. 877 - 889)
vec3 incrementalGaussian;
incrementalGaussian.x = 1.0 / (sqrt(2.0 * pi) * sigma);
incrementalGaussian.y = exp(-0.5 / (sigma * sigma));
incrementalGaussian.z = incrementalGaussian.y * incrementalGaussian.y;
vec4 avgValue = vec4(0.0, 0.0, 0.0, 0.0);
float coefficientSum = 0.0;
// Take the central sample first...
avgValue += texture2D(texture, vertTexCoord.st) * incrementalGaussian.x;
coefficientSum += incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
// Go through the remaining 8 vertical samples (4 on each side of the center)
for (float i = 1.0; i <= numBlurPixelsPerSide; i++) {
avgValue += texture2D(texture, vertTexCoord.st - i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
avgValue += texture2D(texture, vertTexCoord.st + i * texOffset *
blurMultiplyVec) * incrementalGaussian.x;
coefficientSum += 2.0 * incrementalGaussian.x;
incrementalGaussian.xy *= incrementalGaussian.yz;
}
gl_FragColor = avgValue / coefficientSum;
}
colfrag.glsl
#define PROCESSING_TEXTURE_SHADER
uniform sampler2D texture;
varying vec4 vertTexCoord;
uniform vec4 o = vec4(0, 0, 0, -7.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 18.0);
void main() {
vec4 pix = texture2D(texture, vertTexCoord.st);
vec4 color = (pix * colorMatrix) + o;
gl_FragColor = color;
}
sketch.pde
PShader contrast, blurry;
PGraphics buf;
void setup() {
size(200, 200, P2D);
buf = createGraphics(width, height, P2D);
contrast = loadShader("colfrag.glsl");
blurry = loadShader("blurFrag.glsl");
// Don't forget to set these
blurry.set("sigma", 4.5);
blurry.set("blurSize", 9);
}
void draw() {
background(100);
buf.beginDraw();
// Reset transparency
// Note, the color used here will affect your edges
// even with zero for alpha
buf.background(100, 0); // set to match main background
buf.noStroke();
buf.fill(255, 30, 30);
buf.ellipse(width/2, height/2, 40, 40);
buf.ellipse(mouseX, mouseY, 40, 40);
blurry.set("horizontalPass", 1);
buf.filter(blurry);
blurry.set("horizontalPass", 0);
buf.filter(blurry);
buf.endDraw();
shader(contrast);
image(buf, 0,0, width,height);
}
Personally I think the sweet spot lies somewhere:
between 8 and 11 for the alpha contrast
between -7 and -9 for the alpha offset
uniform vec4 o = vec4(0, 0, 0, -9.0);
uniform lowp mat4 colorMatrix = mat4(1.0, 0.0, 0.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
1.0, 1.0, 1.0, 11.0);
bewteen 10 and 15 for "sigma"
bewteen 30 and 40 for "blurSize"
blurry.set("sigma", 14.5)
blurry.set("blurSize", 35)
I've coded 2d metaballs before using signed distance functions and marching square algorithms but I find this solution to be the most efficient one. Performance wise I can display up to 4500 balls at 60 fps on a 800x600 canvas (tested on an entry-level 2012 imac desktop with Python Mode).
Unfortunately I'm not able to debug the exact issue, but I have a couple of ideas that hopefully might help you make some progress:
For a simpler/cheaper effect you can use the dilate filter
You can find other metaballs shaders on shadertoy and tweak the code a bit so you can run it in Processing
For example https://www.shadertoy.com/view/MlcGWn becomes:
// https://www.shadertoy.com/view/MlcGWn
uniform float iTime;
uniform vec2 iResolution;
vec3 Sphere(vec2 uv, vec2 position, float radius)
{
float dist = radius / distance(uv, position);
return vec3(dist * dist);
}
void main()
{
vec2 uv = 2.0 * vec2(gl_FragCoord.xy - 0.5 * iResolution.xy) / iResolution.y;
vec3 pixel = vec3(0.0, 0.0, 0.0);
vec2 positions[4];
positions[0] = vec2(sin(iTime * 1.4) * 1.3, cos(iTime * 2.3) * 0.4);
positions[1] = vec2(sin(iTime * 3.0) * 0.5, cos(iTime * 1.3) * 0.6);
positions[2] = vec2(sin(iTime * 2.1) * 0.1, cos(iTime * 1.9) * 0.8);
positions[3] = vec2(sin(iTime * 1.1) * 1.1, cos(iTime * 2.6) * 0.7);
for (int i = 0; i < 4; i++)
pixel += Sphere(uv, positions[i], 0.22);
pixel = step(1.0, pixel) * pixel;
gl_FragColor = vec4(pixel, 1.0);
}
and in Processing:
PShader shader;
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}
or https://www.shadertoy.com/view/ldtSRX
// https://www.shadertoy.com/view/ldtSRX
uniform vec2 iResolution;
uniform vec2 iMouse;
uniform float iTime;
struct Metaball{
vec2 pos;
float r;
vec3 col;
};
vec4 calcball( Metaball ball, vec2 uv)
{
float dst = ball.r / (pow(abs(uv.x - ball.pos.x), 2.) + pow(abs(uv.y - ball.pos.y), 2.));
return vec4(ball.col * dst, dst);
}
vec3 doballs( vec2 uv )
{
Metaball mouse;
mouse.pos = iMouse.xy / iResolution.yy;
mouse.r = .015;
mouse.col = vec3(.5);
Metaball mb1, mb2, mb3, mb4;
mb1.pos = vec2(1.3, .55+.2*sin(iTime*.5)); mb1.r = .05; mb1.col = vec3(0., 1., 0.);
mb2.pos = vec2(.6, .45); mb2.r = .02; mb2.col = vec3(0., .5, 1.);
mb3.pos = vec2(.85, .65); mb3.r = .035; mb3.col = vec3(1., .2, 0.);
mb4.pos = vec2(1.+.5*sin(iTime), .2); mb4.r = .02; mb4.col = vec3(1., 1., 0.);
vec4 ball1 = calcball(mb1, uv);
vec4 ball2 = calcball(mb2, uv);
vec4 ball3 = calcball(mb3, uv);
vec4 ball4 = calcball(mb4, uv);
vec4 subball1 = calcball(mouse, uv);
float res = ball1.a + ball2.a + ball3.a + ball4.a;
res -= subball1.a;
float threshold = res >= 1.5 ? 1. : 0.;
vec3 color = (ball1.rgb + ball2.rgb + ball3.rgb + ball4.rgb - subball1.rgb) / res;
color *= threshold;
color = clamp(color, 0., 1.);
return color;
}
#define ANTIALIAS 1
void main()
{
vec2 uv = gl_FragCoord.xy / iResolution.yy;
vec3 color = doballs(uv);
#ifdef ANTIALIAS
float uvs = .75 / iResolution.y;
color *= .5;
color += doballs(vec2(uv.x + uvs, uv.y))*.125;
color += doballs(vec2(uv.x - uvs, uv.y))*.125;
color += doballs(vec2(uv.x, uv.y + uvs))*.125;
color += doballs(vec2(uv.x, uv.y - uvs))*.125;
#if ANTIALIAS == 2
color *= .5;
color += doballs(vec2(uv.x + uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y + uvs*.85))*.125;
color += doballs(vec2(uv.x - uvs*.85, uv.y - uvs*.85))*.125;
color += doballs(vec2(uv.x + uvs*.85, uv.y - uvs*.85))*.125;
#endif
#endif
gl_FragColor = vec4(color, 1.);
}
and in Processing:
PShader shader;
PVector mouse = new PVector();
void setup(){
size(900,900,P2D);
shader = loadShader("metaballs.glsl");
shader.set("iResolution",(float)width/2,(float)height/2);
}
void draw(){
mouse.set(mouseX,mouseY);
shader.set("iMouse", mouse);
shader.set("iTime", millis() * 0.001);
shader(shader);
rect(0,0,width,height);
}

Oren-Nayar lighting in OpenGL (how to calculate view direction in fragment shader)

I'm trying to implement Oren-Nayar lighting in the fragment shader as shown here.
However, I'm getting some strange lighting effects on the terrain as shown below.
I am currently sending the shader the 'view direction' uniform as the camera's 'front' vector. I am not sure if this is correct, as moving the camera around changes the artifacts.
Multiplying the 'front' vector by the MVP matrix gives a better result, but the artifacts are still very noticable when viewing the terrain from some angles. It is particularly noticable in dark areas and around the edges of the screen.
What could be causing this effect?
Artifact example
How the scene should look
Vertex Shader
#version 450
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
out VS_OUT {
vec3 normal;
} vert_out;
void main() {
vert_out.normal = normal;
gl_Position = vec4(position, 1.0);
}
Tesselation Control Shader
#version 450
layout(vertices = 3) out;
in VS_OUT {
vec3 normal;
} tesc_in[];
out TESC_OUT {
vec3 normal;
} tesc_out[];
void main() {
if(gl_InvocationID == 0) {
gl_TessLevelInner[0] = 1.0;
gl_TessLevelInner[1] = 1.0;
gl_TessLevelOuter[0] = 1.0;
gl_TessLevelOuter[1] = 1.0;
gl_TessLevelOuter[2] = 1.0;
gl_TessLevelOuter[3] = 1.0;
}
tesc_out[gl_InvocationID].normal = tesc_in[gl_InvocationID].normal;
gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;
}
Tesselation Evaluation Shader
#version 450
layout(triangles, equal_spacing) in;
in TESC_OUT {
vec3 normal;
} tesc_in[];
out TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} tesc_out;
uniform mat4 model_view;
uniform mat4 model_view_perspective;
uniform mat3 normal_matrix;
uniform mat4 depth_matrix;
vec3 lerp(vec3 v0, vec3 v1, vec3 v2) {
return (
(vec3(gl_TessCoord.x) * v0) +
(vec3(gl_TessCoord.y) * v1) +
(vec3(gl_TessCoord.z) * v2)
);
}
vec4 lerp(vec4 v0, vec4 v1, vec4 v2) {
return (
(vec4(gl_TessCoord.x) * v0) +
(vec4(gl_TessCoord.y) * v1) +
(vec4(gl_TessCoord.z) * v2)
);
}
void main() {
gl_Position = lerp(
gl_in[0].gl_Position,
gl_in[1].gl_Position,
gl_in[2].gl_Position
);
tesc_out.normal = normal_matrix * lerp(
tesc_in[0].normal,
tesc_in[1].normal,
tesc_in[2].normal
);
tesc_out.height = gl_Position.y;
tesc_out.shadow_position = depth_matrix * gl_Position;
gl_Position = model_view_perspective * gl_Position;
}
Fragment Shader
#version 450
in TESE_OUT {
vec3 normal;
float height;
vec4 shadow_position;
} frag_in;
out vec4 colour;
uniform vec3 view_direction;
uniform vec3 light_position;
#define PI 3.141592653589793
void main() {
const vec3 ambient = vec3(0.1, 0.1, 0.1);
const float roughness = 0.8;
const vec4 water = vec4(0.0, 0.0, 0.8, 1.0);
const vec4 sand = vec4(0.93, 0.87, 0.51, 1.0);
const vec4 grass = vec4(0.0, 0.8, 0.0, 1.0);
const vec4 ground = vec4(0.49, 0.27, 0.08, 1.0);
const vec4 snow = vec4(0.9, 0.9, 0.9, 1.0);
if(frag_in.height == 0.0) {
colour = water;
} else if(frag_in.height < 0.2) {
colour = sand;
} else if(frag_in.height < 0.575) {
colour = grass;
} else if(frag_in.height < 0.8) {
colour = ground;
} else {
colour = snow;
}
vec3 normal = normalize(frag_in.normal);
vec3 view_dir = normalize(view_direction);
vec3 light_dir = normalize(light_position);
float NdotL = dot(normal, light_dir);
float NdotV = dot(normal, view_dir);
float angleVN = acos(NdotV);
float angleLN = acos(NdotL);
float alpha = max(angleVN, angleLN);
float beta = min(angleVN, angleLN);
float gamma = dot(view_dir - normal * dot(view_dir, normal), light_dir - normal * dot(light_dir, normal));
float roughnessSquared = roughness * roughness;
float roughnessSquared9 = (roughnessSquared / (roughnessSquared + 0.09));
// calculate C1, C2 and C3
float C1 = 1.0 - 0.5 * (roughnessSquared / (roughnessSquared + 0.33));
float C2 = 0.45 * roughnessSquared9;
if(gamma >= 0.0) {
C2 *= sin(alpha);
} else {
C2 *= (sin(alpha) - pow((2.0 * beta) / PI, 3.0));
}
float powValue = (4.0 * alpha * beta) / (PI * PI);
float C3 = 0.125 * roughnessSquared9 * powValue * powValue;
// now calculate both main parts of the formula
float A = gamma * C2 * tan(beta);
float B = (1.0 - abs(gamma)) * C3 * tan((alpha + beta) / 2.0);
// put it all together
float L1 = max(0.0, NdotL) * (C1 + A + B);
// also calculate interreflection
float twoBetaPi = 2.0 * beta / PI;
float L2 = 0.17 * max(0.0, NdotL) * (roughnessSquared / (roughnessSquared + 0.13)) * (1.0 - gamma * twoBetaPi * twoBetaPi);
colour = vec4(colour.xyz * (L1 + L2), 1.0);
}
First I've plugged your fragment shader into my renderer with my view/normal/light vectors and it works perfectly. So the problem has to be in the way you calculate those vectors.
Next, you say that you set view_dir to your camera's front vector. I assume that you meant "camera's front vector in the world space" which would be incorrect. Since you calculate the dot products with vectors in the camera space, the view_dir must be in the camera space too. That is vec3(0,0,1) would be an easy way to check that. If it works -- we found your problem.
However, using (0,0,1) for the view direction is not strictly correct when you do perspective projection, because the direction from the fragment to the camera then depends on the location of the fragment on the screen. The correct formula then would be view_dir = normalize(-pos) where pos is the fragment's position in camera space (that is with model-view matrix applied without the projection). Further, this quantity now depends only on the fragment location on the screen, so you can calculate it as:
view_dir = normalize(vec3(-(gl_FragCoord.xy - frame_size/2) / (frame_width/2), flen))
flen is the focal length of your camera, which you can calculate as flen = cot(fovx/2).
I know this is a long dead thread, but I've been having the same problem (for several years), and finally found the solution...
It can be partially solved by fixing the orientation of the surface normals to match the polygon winding direction, but you can also get rid of the artifacts in the shader, by changing the following two lines...
float angleVN = acos(cos_nv);
float angleLN = acos(cos_nl);
to this...
float angleVN = acos(clamp(cos_nv, -1.0, 1.0));
float angleLN = acos(clamp(cos_nl, -1.0, 1.0));
Tada!

WebGL Normal calculations from position texture

Iam trying to create a procedural water puddle in webGL with "water ripples" by vertex displacement.
The problem I'm having is that I get a noise I can't explain.
Below is the first pass vertex shader where I calculate the vertex positions that i later render to a texture that i then use in the second pass.
void main() {
float damping = 0.5;
vNormal = normal;
// wave radius
float timemod = 0.55;
float ttime = mod(time , timemod);
float frequency = 2.0*PI/waveWidth;
float phase = frequency * 0.21;
vec4 v = vec4(position,1.0);
// Loop through array of start positions
for(int i = 0; i < 200; i++){
float cCenterX = ripplePos[i].x;
float cCenterY = ripplePos[i].y;
vec2 center = vec2(cCenterX, cCenterY) ;
if(center.x == 0.0 && center.y == 0.0)
center = normalize(center);
// wave width
float tolerance = 0.005;
radius = sqrt(pow( uv.x - center.x , 2.0) + pow( uv.y -center.y, 2.0));
// Creating a ripple
float w_height = (tolerance - (min(tolerance,pow(ripplePos[i].z-radius*10.0,2.0)) )) * (1.0-ripplePos[i].z/timemod) *5.82;
// -2.07 in the end to keep plane at right height. Trial and error solution
v.z += waveHeight*(1.0+w_height/tolerance) / 2.0 - 2.07;
vNormal = normal+v.z;
}
vPosition = v.xyz;
gl_Position = projectionMatrix * modelViewMatrix * v;
}
And the first pass fragment shader that writes to the texture:
void main()
{
vec3 p = normalize(vPosition);
p.x = (p.x+1.0)*0.5;
p.y = (p.y+1.0)*0.5;
gl_FragColor = vec4( normalize(p), 1.0);
}
The second vertex shader is a standard passthrough.
Second pass fragmentshader is where I try to calculate the normals to be used for light calculations.
void main() {
float w = 1.0 / 200.0;
float h = 1.0 / 200.0;
// Nearest Nieghbours
vec3 p0 = texture2D(rttTexture, vUV).xyz;
vec3 p1 = texture2D(rttTexture, vUV + vec2(-w, 0)).xyz;
vec3 p2 = texture2D(rttTexture, vUV + vec2( w, 0)).xyz;
vec3 p3 = texture2D(rttTexture, vUV + vec2( 0, h)).xyz;
vec3 p4 = texture2D(rttTexture, vUV + vec2( 0, -h)).xyz;
vec3 nVec1 = p2 - p0;
vec3 nVec2 = p3 - p0;
vec3 vNormal = cross(nVec1, nVec2);
vec3 N = normalize(vNormal);
float theZ = texture2D(rttTexture, vUV).r;
//gl_FragColor = vec4(1.,.0,1.,1.);
//gl_FragColor = texture2D(tDiffuse, vUV);
gl_FragColor = vec4(vec3(N), 1.0);
}
The result is this:
The image displays the normalmap and the noise I'm refering to is the inconsistency of the blue.
Here is a live demonstration:
http://oskarhavsvik.se/jonasgerling_water_ripple/waterRTT-clean.html
I appreciate any tips and pointers, not only fixes for this problem. But the code in genereal, I'm here to learn.
After a brief look it seems like your problem is in storing x/y positions.
gl_FragColor = vec4(vec3(p0*0.5+0.5), 1.0);
You don't need to store them anyway, because the texel position implicitly gives the x/y value. Just change your normal points to something like this...
vec3 p2 = vec3(1, 0, texture2D(rttTexture, vUV + vec2(w, 0)).z);
Rather than 1, 0 you will want to use a scale appropriate to the size of your displayed quad relative to the wave height. Anyway, the result now looks like this.
The height/z seems to be scaled by distance from the centre, so I went looking for a normalize() and removed it...
vec3 p = vPosition;
gl_FragColor = vec4(p*0.5+0.5, 1.0);
The normals now look like this...

Reconstructed position from depth leads to incorrect lighting

I am attempting to reconstruct my fragment's position from a depth value stored in a GL_DEPTH_ATTACHMENT. To do this, I linearize the depth then multiply the depth by a ray from the camera position and to the corresponding point on the far plane.
This method is the second one described here. In order to get the ray from the camera to the far plane, I retrieve rays to the four corners of the far planes, pass them to my vertex shader, then interpolate into the fragment shader. I am using the following code to get the rays from the camera to the far plane's corners in world space.
std::vector<float> Camera::GetFlatFarFrustumCorners() {
// rotation is the orientation of my camera in a quaternion.
glm::quat inverseRotation = glm::inverse(rotation);
glm::vec3 localUp = glm::normalize(inverseRotation * glm::vec3(0.0f, 1.0f, 0.0f));
glm::vec3 localRight = glm::normalize(inverseRotation * glm::vec3(1.0f, 0.0f, 0.0f));
float farHeight = 2.0f * tan(90.0f / 2) * 100.0f;
float farWidth = farHeight * aspect;
// 100.0f is the distance to the far plane. position is the location of the camera in word space.
glm::vec3 farCenter = position + glm::vec3(0.0f, 0.0f, -1.0f) * 100.0f;
glm::vec3 farTopLeft = farCenter + (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
glm::vec3 farTopRight = farCenter + (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));
glm::vec3 farBottomLeft = farCenter - (localUp * (farHeight / 2)) - (localRight * (farWidth / 2));
glm::vec3 farBottomRight = farCenter - (localUp * (farHeight / 2)) + (localRight * (farWidth / 2));
return {
farTopLeft.x, farTopLeft.y, farTopLeft.z,
farTopRight.x, farTopRight.y, farTopRight.z,
farBottomLeft.x, farBottomLeft.y, farBottomLeft.z,
farBottomRight.x, farBottomRight.y, farBottomRight.z
};
}
Is this a correct way to retrieve the corners of the far plane in world space?
When I use these corners with my shaders, the results are incorrect, and what I get seems to be in view space. These are the shaders I am using:
Vertex Shader:
layout(location = 0) in vec2 vp;
layout(location = 1) in vec3 textureCoordinates;
uniform vec3 farFrustumCorners[4];
uniform vec3 cameraPosition;
out vec2 st;
out vec3 frustumRay;
void main () {
st = textureCoordinates.xy;
gl_Position = vec4 (vp, 0.0, 1.0);
frustumRay = farFrustumCorners[int(textureCoordinates.z)-1] - cameraPosition;
}
Fragment Shader:
in vec2 st;
in vec3 frustumRay;
uniform sampler2D colorTexture;
uniform sampler2D normalTexture;
uniform sampler2D depthTexture;
uniform vec3 cameraPosition;
uniform vec3 lightPosition;
out vec3 color;
void main () {
// Far and near distances; Used to linearize the depth value.
float f = 100.0;
float n = 0.1;
float depth = (2 * n) / (f + n - (texture(depthTexture, st).x) * (f - n));
vec3 position = cameraPosition + (normalize(frustumRay) * depth);
vec3 normal = texture(normalTexture, st);
float k = 0.00001;
vec3 distanceToLight = lightPosition - position;
float distanceLength = length(distanceToLight);
float attenuation = (1.0 / (1.0 + (0.1 * distanceLength) + k * (distanceLength * distanceLength)));
float diffuseTemp = max(dot(normalize(normal), normalize(distanceToLight)), 0.0);
vec3 diffuse = vec3(1.0, 1.0, 1.0) * attenuation * diffuseTemp;
vec3 gamma = vec3(1.0/2.2);
color = pow(texture(colorTexture, st).xyz+diffuse, gamma);
//color = texture(colorTexture, st);
//colour.r = (2 * n) / (f + n - texture( tex, st ).x * (f - n));
//colour.g = (2 * n) / (f + n - texture( tex, st ).y* (f - n));
//colour.b = (2 * n) / (f + n - texture( tex, st ).z * (f - n));
}
This is what my scene's lighting looks like under these shaders:
I am pretty sure that this is the result of either my reconstructed position being completely wrong, or it being in the wrong space. What is wrong with my reconstruction, and what can I do to fix it?
What you will first want to do is develop a temporary addition to your G-Buffer setup that stores the initial position of each fragment in world/view space (really, whatever space you are trying to reconstruct here). Then write a shader that does nothing but reconstruct these positions from the depth buffer. Set everything up so that half of your screen is displays the original G-Buffer and the other half displays your reconstructed position. You should be able to immediately spot discrepancies this way.
That said, you might want to take a look at an implementation I have used in the past to reconstruct (object space) position from the depth buffer. It basically gets you into view space first, then uses the inverse modelview matrix to go to object space. You can adjust it for world space trivially. It is probably not the most flexible implementation, what with FOV being hard-coded and all, but you can easily modify it to use uniforms instead...
Trimmed down fragment shader:
flat in mat4 inv_mv_mat;
in vec2 uv;
...
float linearZ (float z)
{
#ifdef INVERT_NEAR_FAR
const float f = 2.5;
const float n = 25000.0;
#else
const float f = 25000.0;
const float n = 2.5;
#endif
return n / (f - z * (f - n)) * f;
}
vec4
reconstruct_pos (float depth)
{
depth = linearZ (depth);
vec4 pos = vec4 (uv * depth, -depth, 1.0);
vec4 ret = (inv_mv_mat * pos);
return ret / ret.w;
}
It takes a little additional setup in the vertex shader stage of the deferred shading lighting pass, which looks like this:
#version 150 core
in vec4 vtx_pos;
in vec2 vtx_st;
uniform mat4 modelview_mat; // Matrix used when the G-Buffer was built
uniform mat4 camera_matrix; // Matrix used to stretch the G-Buffer over the viewport
uniform float buffer_res_x;
uniform float buffer_res_y;
out vec2 tex_st;
flat out mat4 inv_mv_mat;
out vec2 uv;
// Hard-Coded 45 degree FOV
//const float fovy = 0.78539818525314331; // NV pukes on the line below!
//const float fovy = radians (45.0);
//const float tan_half_fovy = tan (fovy * 0.5);
const float tan_half_fovy = 0.41421356797218323;
float aspect = buffer_res_x / buffer_res_y;
vec2 inv_focal_len = vec2 (tan_half_fovy * aspect,
tan_half_fovy);
const vec2 uv_scale = vec2 (2.0, 2.0);
const vec2 uv_translate = vec2 (1.0, 1.0);
void main (void)
{
inv_mv_mat = inverse (modelview_mat);
tex_st = vtx_st;
gl_Position = camera_matrix * vtx_pos;
uv = (vtx_st * uv_scale - uv_translate) * inv_focal_len;
}
Depth range inversion is something you might find useful for deferred shading, normally a perspective depth buffer gives you more precision than you need at close range and not enough far away for quality reconstruction. If you flip things on their head by inverting the depth range you can even things out a little bit while still using the hardware depth buffer. This is discussed in detail here.