How to overcome differences between off-screen and on-screen framebuffer rendering? - c++

I'm trying to use an off-screen framebuffer to replicate a scene that renders wonderfully to the default framebuffer. There seem to be differences in the rendering that I can't sort out.
For context, I am visualizing the Earth with an atmospheric shader. I am using a QT QOpenGLWidget, but mostly raw GL calls because I'm not a fan of QT's abstractions. I need to render this scene to an off-screen framebuffer because I would like to implement some post-processing effects in my visualization, for which I need to be able to sample the scene as a texture. I've gotten to the point where I am successfully creating a framebuffer and rendering its color texture to a quad on the screen.
My understanding is that alpha blending behaves differently when rendering to an off-screen framebuffer compared to the default. I haven't been able to find any resources online that indicate a way to produce identical results without a major refactor. The methodologies I've seen involve either manually rendering objects in order from back to front, or baking in the alpha values to the colors that are sent to the framebuffer. I've tried an often suggested alternative, which is using glBlendFuncSeparate to control things more manually:
glEnable(GL_BLEND);
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
But that hasn't led to any noticeable improvement in my results (nor would I expect it to, since the math here wouldn't resolve the blending issues that I'm seeing).
So enough rambling, onto some actual code. My code-base is monstrous so I unfortunately can't share all of it, as there are a number of proprietary drawing routines, but I can start with how I generate my framebuffer:
// Create the framebuffer object
glGenFramebuffers(1, &m_fbo);
// Bind the framebuffer to the current context
glBindFramebuffer(GL_FRAMEBUFFER, m_fbo);
// generate texture to attach as a color attachment to the current frame buffer
m_texColorUnit = 4;
// Set to width and height of window, and leave data uninitialized
glGenTextures(1, &m_texColorBuffer);
glActiveTexture(GL_TEXTURE0 + m_texColorUnit);
glBindTexture(GL_TEXTURE_2D, m_texColorBuffer);
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGB8_OES,
GL_UNSIGNED_BYTE,
NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
// attach texture to currently bound framebuffer object
glFramebufferTexture2D(GL_FRAMEBUFFER,
GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D,
m_texColorBuffer,
0);
glBindTexture(GL_TEXTURE_2D, 0); //unbind the texture
glActiveTexture(GL_TEXTURE0); // Reset active texture to default
// Create renderBuffer object for depth and stencil checking
glGenRenderbuffers(1, &m_rbo);
glBindRenderbuffer(GL_RENDERBUFFER, m_rbo); // bind rbo
glRenderbufferStorage(GL_RENDERBUFFER,
GL_DEPTH24_STENCIL8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight()
); // allocate memory
// Attach rbo to the depth and stencil attachment of the fbo
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_OES,
GL_RENDERBUFFER,
m_rbo);
And the shaders for the atmosphere:
// vert
#ifndef GL_ES
precision mediump int;
precision highp float;
#endif
attribute vec3 posAttr;
uniform highp mat4 matrix;
uniform highp mat4 modelMatrix;
uniform vec3 v3CameraPos; // The camera's current position
uniform vec3 v3LightPos; // The direction vector to the light source
uniform vec3 v3InvWavelength; // 1 / pow(wavelength, 4) for the red, green, and blue channels
uniform float fCameraHeight; // The camera's current height
uniform float fCameraHeight2; // fCameraHeight^2
uniform float fOuterRadius; // The outer (atmosphere) radius
uniform float fOuterRadius2; // fOuterRadius^2
uniform float fInnerRadius; // The inner (planetary) radius
uniform float fInnerRadius2; // fInnerRadius^2
uniform float fKrESun; // Kr * ESun
uniform float fKmESun; // Km * ESun
uniform float fKr4PI; // Kr * 4 * PI
uniform float fKm4PI; // Km * 4 * PI
uniform float fScale; // 1 / (fOuterRadius - fInnerRadius)
uniform float fScaleDepth; // The scale depth (i.e. the altitude at which the atmosphere's average density is found)
uniform float fScaleOverScaleDepth; // fScale / fScaleDepth
const int nSamples = 5;
const float fSamples = 5.0;
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
float scale(float fCos)
{
float x = 1.0 - fCos;
return fScaleDepth * exp(-0.00287 + x*(0.459 + x*(3.83 + x*(-6.80 + x*5.25))));
}
void main(void)
{
// Get the ray from the camera to the vertex and its length (which is the far point of the ray passing through the atmosphere)
vec3 v3Pos = posAttr;
vec3 vertexWorld = posAttr;
vec3 v3Ray = v3Pos - v3CameraPos;
float fFar = length(v3Ray);
v3Ray /= fFar;
// Calculate the closest intersection of the ray with the outer atmosphere (which is the near point of the ray passing through the atmosphere)
float B = 2.0 * dot(v3CameraPos, v3Ray);
float C = fCameraHeight2 - fOuterRadius2;
float fDet = max(0.0, B*B - 4.0 * C);
float fNear = 0.5 * (-B - sqrt(fDet));
// Calculate the ray's starting position, then calculate its scattering offset
vec3 v3Start = v3CameraPos + v3Ray*fNear;
fFar -= fNear;
float fStartAngle = dot(v3Ray, v3Start) / fOuterRadius;
float fStartDepth = exp(-1.0 / fScaleDepth);
float fStartOffset = fStartDepth*scale(fStartAngle);
// Initialize the scattering loop variables
float fSampleLength = fFar / fSamples;
float fScaledLength = fSampleLength * fScale;
vec3 v3SampleRay = v3Ray * fSampleLength;
vec3 v3SamplePoint = v3Start + v3SampleRay * 0.5;
// Now loop through the sample rays
vec3 v3FrontColor = vec3(0.0, 0.0, 0.0);
for(int i=0; i<nSamples; i++)
{
float fHeight = length(v3SamplePoint);
float fDepth = exp(fScaleOverScaleDepth * (fInnerRadius - fHeight));
float fLightAngle = dot(v3LightPos, v3SamplePoint) / fHeight;
float fCameraAngle = dot(v3Ray, v3SamplePoint) / fHeight;
float fScatter = (fStartOffset + fDepth*(scale(fLightAngle) - scale(fCameraAngle)));
vec3 v3Attenuate = exp(-fScatter * (v3InvWavelength * fKr4PI + fKm4PI));
v3FrontColor += v3Attenuate * (fDepth * fScaledLength);
v3SamplePoint += v3SampleRay;
}
// Finally, scale the Mie and Rayleigh colors and set up the varying variables for the pixel shader
colatten = v3FrontColor * fKmESun;
col = v3FrontColor * (v3InvWavelength*fKrESun);
v3Direction = v3CameraPos - v3Pos;
gl_Position = matrix * modelMatrix * vec4(posAttr,1);
}
// frag
#ifdef GL_ES
precision highp float;
precision mediump int;
#endif
varying vec3 col;
varying vec3 colatten;
varying vec3 v3Direction;
varying vec3 vertexWorld;
uniform vec3 v3LightPos;
uniform float g;
uniform float g2;
uniform float fExposure;
void main (void)
{
//float fCos = dot(normalize(lPos), normalize(v3Direction));
float fCos = dot(v3LightPos, v3Direction) / length(v3Direction);
float fRayleighPhase = 0.75 * (1.0 + fCos*fCos);
float fMiePhase = 1.5 * ((1.0 - g2) / (2.0 + g2)) * (1.0 + fCos*fCos) / pow(1.0 + g2 - 2.0*g*fCos, 1.5);
//vec3 result = clamp(col + fMiePhase * colatten, vec3(0,0,0), vec3(1,1,1));
//gl_FragColor = vec4(result, result.b);
gl_FragColor.rgb = 1.0 - exp(-fExposure * (fRayleighPhase * col + fMiePhase * colatten));
//gl_FragColor.a = 1.0;
gl_FragColor.a = gl_FragColor.b;
}
As I've said, my results are less than stellar. The first image is what I get when rendering to the off-screen framebuffer, and the second image is when I render directly to the screen. Any ideas on how to resolve these two?

The depth render buffer is not attached to the framebuffer. The 2nd parameter of glFramebufferRenderbuffer has to be the attachment point.
GL_DEPTH_STENCIL_OES is not a valid value for a attachment point. So
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_OES,
GL_RENDERBUFFER,
m_rbo);
will cause GL_INVALID_ENUM error, which can be get by glGetError.
The enumerator constant which specifies the depth and stencil buffer is GL_DEPTH_STENCIL_ATTACHMENT:
glFramebufferRenderbuffer(GL_FRAMEBUFFER,
GL_DEPTH_STENCIL_ATTACHMENT,
GL_RENDERBUFFER,
m_rbo);
Note, the depth/stencil buffer is not attached to the framebuffer, but the framebuffer is still complete, without a depth and stencil buffer.
Alternatively you can use a depth buffer only attachment. Create a depth render buffer (GL_DEPTH_COMPONENT) add use the attachment type GL_DEPTH_ATTACHMENT.
The issue is caused, because the texture, which is attached to the color plane of the framebuffer has no alpha channel. The format GL_RGB8_OES provides the 3 color channels (RGB) but no alpha channel.
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGB8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGB8_OES,
GL_UNSIGNED_BYTE,
NULL);
You've to use the format and internal format GL_RGBA8_OES rather than GL_RGB8_OES, which is included in OES_required_internalformat, too. See also __gles2_gl2ext_h_:
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RGBA8_OES,
m_navigation->renderContext()->getWidth(),
m_navigation->renderContext()->getHeight(),
0,
GL_RGBA8_OES,
GL_UNSIGNED_BYTE,
NULL);

Related

Why does VSM Depth Map Blurring produces strange results?

I am trying to implement Variance Shadow Mapping for directional shadows in my rendering engine with OpenGL.
I have read multiple articles such as - https://developer.nvidia.com/gpugems/gpugems3/part-ii-light-and-shadows/chapter-8-summed-area-variance-shadow-maps, https://graphics.stanford.edu/~mdfisher/Shadows.html to develop this.
The basic flow of the algorithm is as follows:
Store the depth, and depth^2 in the depth texture.
Apply two pass Gaussian blur with a 5 x 5 kernel and 10 passes.
Sample a depth value, calculate the fragment's distance from the light, and
Put them in the Chebyshev inequality to determine the maximum probability of the fragment being in shadow
Use the result to make the fragment dark.
Here's my Depth Shader for the directional light with a orthographic projection matrix:
#version 440 core
uniform float farPlane;
uniform vec3 lightPos;
uniform mat4 directional_light_space_matrix;
in vec4 FragPos;
out vec2 depth;
void main()
{
vec4 FragPosLightSpace = directional_light_space_matrix * FragPos;
float d = FragPosLightSpace.z / FragPosLightSpace.w;
d = d * 0.5 + 0.5;
float m1 = d;
float m2 = d * d;
float dx = dFdx(depth.x);
float dy = dFdx(depth.y);
m2 += 0.25 * (dx * dx + dy * dy);
depth.r = m1;
depth.g = m2;
}
Here's the snippet of the fragment shader that check's how much a fragment is lit.
float linstep(float mi, float ma, float v)
{
return clamp ((v - mi)/(ma - mi), 0, 1);
}
float ReduceLightBleeding(float p_max, float Amount)
{
return linstep(Amount, 1, p_max);
}
float chebyshevUpperBound(float dist, vec2 moments)
{
float p_max;
if(dist <= moments.x)
{
return 1.0;
}
float variance = moments.y - (moments.x * moments.x);
variance = max(variance, 0.1);
float d = moments.x - dist;
p_max = variance / (variance + d * d);
return ReduceLightBleeding(p_max, 1.0);
}
float CheckDirectionalShadow(float bias, vec3 lightpos, vec3 FragPos)
{
vec3 projCoords = FragPosLightSpace.xyz / FragPosLightSpace.w;
projCoords = projCoords * 0.5 + 0.5;
vec2 closest_depth = texture(shadow_depth_map_directional, projCoords.xy).rg;
return chebyshevUpperBound(projCoords.z, closest_depth);
}
Here's the Two Pass Gaussian Blur shader.
#version 440 core
layout (location = 0) out vec2 out_1;
in vec2 TexCoords;
uniform sampler2D inputTexture_1;
uniform bool horizontal;
float weights[5] = float[](0.227027, 0.1945946, 0.1216216, 0.054054, 0.016216);
void main()
{
vec2 tex_offset = 1.0 / textureSize(inputTexture_1,0);
vec2 o1 = texture(inputTexture_1, TexCoords).rg * weights[0];
if(horizontal)
{
for(int i=1; i<4; i++)
{
o1 += texture(inputTexture_1, TexCoords + vec2(tex_offset.x * i, 0.0)).rg * weights[i];
o1 += texture(inputTexture_1, TexCoords - vec2(tex_offset.x * i, 0.0)).rg * weights[i];
}
}
else
{
for(int i=1; i<4; i++)
{
o1 += texture(inputTexture_1, TexCoords + vec2(0.0, tex_offset.y * i)).rg * weights[i];
o1 += texture(inputTexture_1, TexCoords - vec2(0.0, tex_offset.y * i)).rg * weights[i];
}
}
out_1 = o1;
}
I am putting my framebuffer generation code for information about how I store the moments.
// directional ----------------------------------------------------------------------------------------------------------------------------------------------
glGenFramebuffers(1, &directional_shadow_framebuffer);
glGenTextures(1, &directional_shadow_framebuffer_depth_texture);
glBindTexture(GL_TEXTURE_2D, directional_shadow_framebuffer_depth_texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RG32F, shadow_map_width, shadow_map_height, 0, GL_RG, GL_FLOAT, NULL);
float border_color[] = { 0.0f,0.0f,0.0f,1.0f };
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, border_color);
glBindFramebuffer(GL_FRAMEBUFFER, directional_shadow_framebuffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, directional_shadow_framebuffer_depth_texture, 0);
glGenRenderbuffers(1, &directional_shadow_framebuffer_renderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, directional_shadow_framebuffer_renderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, shadow_map_width, shadow_map_height);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, directional_shadow_framebuffer_renderbuffer);
if (glCheckFramebufferStatus(GL_FRAMEBUFFER) != GL_FRAMEBUFFER_COMPLETE)
LOGGER->log(ERROR, "Renderer : createShadowMapBuffer", "Directional Shadow Framebuffer is incomplete!");
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// ----------------------------------------------------------------------------------------------------------------------------------------------
The results of the above operations is far from expectations. Instead of getting soft penumbra shadows, I get blob like sharp shadows.
Here's how the First moment (depth) looks like, and the second moment is pretty much the same but darker.
I have tried experimenting with the minimum variance, shadow kernel size, gaussian samples, blur passes.. but I haven't come any closer to the solution.
I have a feeling I maybe doing something wrong with how I have set the texture filtering parameters in the Framebuffer generation code given above.
My final questions are :
Is my implementation of VSMs incorrect?
Why do I not see soft penumbras?
I don't have a good feeling about how my texture is filtered, is there anything wrong in the Framebuffer generation code?
So, I had solved the problem.
The implementation is perfectly fine, but the min variance and the amount parameter of the ReduceLightBleeding required tuning.
I discovered that reducing the minimum variance parameter would soften the shadows more, but would greatly increase Light Bleeding.
To counter this side effect we can tune the p_max value to become 0 when below a certain threshold, otherwise rescale between 0 and 1. This is exactly what the ReduceLightBleeding function does, which is also described in the same site linked above.
But, increasing the amount parameter in ReduceLightBleeding would make the shadows look blob-like, which can be seen in the screenshots that I posted above.
I managed to tweak the min variance and the light bleeding reduction amounts to find an optimal spot. However, I could never completely get rid of this artifact.
A better alternative to Variance Shadow Mapping is its extension - Exponential Variance Shadow Maps.
I do not understand the math properly, but I still managed to implement it quite easily.
Check this question on gamedev.stackexchange for hints - EVSM.
ESVM did a great job by reducing bleeding to the point that it can either not be noticed or just ignored.

Shadow Mapping OpenGL shadow not always drawing, and drawing where the position of the light is

I have been trying to do basic Shadow Mapping in my custom Engine using LearnOpenGL as the source. The link for the exact tutorial can be found: here.
I have been debugging this bug for around two weeks, researching the internet, and even trying to wrap my head around this, but all I can say is that the shadow almost never appears, and when it appears it is where the light is Pos is terms of x and z. I tried to do everything exactly like in the tutorial around 10 times, I also tried to check this website for similar questions but for every way I found, it was not my case.
findings
In this Image(1) you can see that the shadow is not visible when the light is on top of it, but it is then visible on this Image (2) when the lightPos.x variable is around -4.5 or 4.5, this is so for the lightPos.z variable too. The shadow when appearing is being drawn where the lightPos is, where in the pictures it is circled by a red line.
I use multiple shaders, one for the light and shadow calculations (ShadowMapping) one for a basic depth mapping (ShadowMapGen)
Here is my ShadowMapping shader:
ShadowMapping Vertex
version 460
in vec3 vertexIn;
in vec3 normalIn;
in vec2 textureIn;
out vec3 FragPos;
out vec3 normalOut;
out vec2 textureOut;
out vec4 FragPosLightSpace;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
uniform mat4 lightSpaceMatrix;
void main()
{
textureOut = textureIn;
FragPos = vec3(model * vec4(vertexIn, 1.0));
normalOut = mat3(transpose(inverse(model))) * normalIn;
FragPosLightSpace = lightSpaceMatrix * vec4(FragPos, 1.0);
gl_Position = projection * view * model * vec4(vertexIn, 1.0);
}
ShadowMapping Frag
out vec4 FragColor;
in vec3 FragPos;
in vec3 normalOut;
in vec2 textureOut;
in vec4 FragPosLightSpace;
uniform sampler2D diffuseTexture;
uniform sampler2D shadowMap;
uniform vec3 lightPos;
uniform vec3 viewPos;
float ShadowCalculation(vec4 fragPosLightSpace, vec3 lightdir)
{
// perform perspective divide
vec3 projCoords = fragPosLightSpace.xyz / fragPosLightSpace.w;
// transform to [0,1] range
projCoords = projCoords * 0.5 + 0.5;
// get closest depth value from light's perspective (using [0,1] range fragPosLight as coords)
float closestDepth = texture(shadowMap, projCoords.xy).r;
// get depth of current fragment from light's perspective
float currentDepth = projCoords.z;
// check whether current frag pos is in shadow
float bias = max(0.05 * (1.0 - dot(normalOut, lightdir)), 0.005);
// check whether current frag pos is in shadow
// float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
// // PCF
float shadow = 0.0;
vec2 texelSize = 1.0 / textureSize(shadowMap, 0);
for(int x = -1; x <= 1; ++x)
{
for(int y = -1; y <= 1; ++y)
{
float pcfDepth = texture(shadowMap, projCoords.xy + vec2(x, y) * texelSize).r;
shadow += currentDepth - bias > pcfDepth ? 1.0 : 0.0;
}
}
shadow /= 9.0;
// keep the shadow at 0.0 when outside the far_plane region of the light's frustum.
if(projCoords.z > 1.0)
shadow = 0.0;
return shadow;
}
void main()
{
vec3 color = texture(diffuseTexture, textureOut).rgb;
vec3 normal = normalize(normalOut);
vec3 lightColor = vec3(1.0f);
// ambient
vec3 ambient = 0.30 * color;
// diffuse
vec3 lightDir = normalize(lightPos - FragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * lightColor;
// specular
vec3 viewDir = normalize(viewPos - FragPos);
vec3 reflectDir = reflect(-lightDir, normal);
float spec = 0.0;
vec3 halfwayDir = normalize(lightDir + viewDir);
spec = pow(max(dot(normal, halfwayDir), 0.0), 64.0);
vec3 specular = spec * lightColor;
// calculate shadow
float shadow = ShadowCalculation(FragPosLightSpace, lightDir);
vec3 lighting = (ambient + (1.0 - shadow) * (diffuse + specular)) * color;
FragColor = vec4(lighting, 1.0);
}
ShadowMapGen Vertex
Fragment Shader is empty for this shader
version 460
in vec3 vertexIn;
uniform mat4 model;
uniform mat4 lightSpaceMatrix;
void main()
{
gl_Position = model * lightSpaceMatrix * vec4(vertexIn, 1.0);
}
Variable initialisation
lightPos = glm::vec3(-2.0f, 4.0f, -1.0f);
near_plane = 1.0f;
far_plane = 7.5f;
//SAMPLE 2D Uniform binding
TheShader::Instance()->SendUniformData("ShadowMapping_diffuseTexture", 0);
TheShader::Instance()->SendUniformData("ShadowMapping_shadowMap", 1);
Depth Map Framebuffer Generation
This is how I generate my depth map/ shadow map texture in the constructor of my scene:
glGenFramebuffers(1, &depthMapFBO);
//Create depth texture
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL); // Height and Width = 1024
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float borderColor[] = { 1.0, 1.0, 1.0, 1.0 };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
//Attach depth texture as FBO's depth buffer
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Then in an Update() function that runs in the While loop of the engine I firstly do:
Render Objects from light's perspective
//Light Projection and view Matrix
m_lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, near_plane, far_plane);
m_lightView = glm::lookAt(lightPos, glm::vec3(0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
//Calculate light matrix and send it.
m_lightSpaceMatrix = m_lightProjection * m_lightView;
TheShader::Instance()->SendUniformData("ShadowMapGen_lightSpaceMatrix", 1, GL_FALSE, m_lightSpaceMatrix);
//Render to Framebuffer depth Map
glViewport(0, 0, SHADOW_WIDTH, SHADOW_HEIGHT);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glClear(GL_DEPTH_BUFFER_BIT);
//Set current Shader to ShadowMapGen
m_floor.SetShader("ShadowMapGen");
m_moon.SetShader("ShadowMapGen");
//Send model Matrix to current Shader
m_floor.Draw();
m_moon.Draw();
//Set current Shader back to ShadowMapping
m_moon.SetShader("ShadowMapping");
m_floor.SetShader("ShadowMapping");
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Render Objects from Camera's perspective
glViewport(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//Update Camera and Send the view and projection matrices to the ShadowMapping shader
m_freeCamera->Update();
m_freeCamera->Draw();
//Send Light Pos
TheShader::Instance()->SendUniformData("ShadowMapping_lightPos", lightPos);
//Send LightSpaceMatrix
TheShader::Instance()->SendUniformData("ShadowMapping_lightSpaceMatrix", 1, GL_FALSE, m_lightSpaceMatrix);
//Activate Shadow Mapping texture
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, depthMap);
//Send model Matrix to ShadowMapping shaders
m_moon.Draw();
m_floor.Draw();
I hope someone will see this, thank you for your time.
I tried to do everything exactly like in the tutorial around 10 times
Well, you seem to have missed at least one obvious thing:
m_lightSpaceMatrix = m_lightProjection * m_lightView;
So far, so good, but in your "ShadowMapGen" vertex shader, you wrote:
gl_Position = model * lightSpaceMatrix * vec4(vertexIn, 1.0);
So you end up with model * projection * view multiplication order, which does not make sense no matter which conventions you adhere to. Since the tutorial uses default GL conventions, you always need projection * view * model * vertex multiplication order, which the tutorial also correctly uses.

OpenGL vertex shader is fast on Linux, but extremely slow on Windows

To draw power spectral density of a signal (which is very similar to heatmap), I use this vertex shader program. It receives value of power at each vertex, takes logarithm to show result in dB, normalizes within the range of colormap array, and assigns a color to vertex.
#version 130
uniform float max_val;
uniform float min_val;
uniform int height;
attribute float val; // power-spectral-density value assigned to each vertex
// colormap values
const float r[512] = float[]( /* red values come here */ );
const float g[512] = float[]( /* green values come here */ );
const float b[512] = float[]( /* blue values come here */ );
void main() {
// set vertex position based on its ID
int x = gl_VertexID / height;
int y = gl_VertexID - x * height;
gl_Position = gl_ModelViewProjectionMatrix * vec4(x, y, -1.0, 1.0);
float e = log(max_val / min_val);
float d = log(val / min_val);
// set color
int idx = int(d * (512 - 1) / e); // find normalized index that falls in range [0, 512)
gl_FrontColor = vec4(r[idx], g[idx], b[idx], 1.0); // set color
}
Corresponding C++ code is here:
QOpenGLShaderProgram glsl_program;
// initialization code is omitted
glsl_program.bind();
glsl_program.setUniformValue(vshader_max_uniform, max_val);
glsl_program.setUniformValue(vshader_min_uniform, min_val);
glsl_program.setUniformValue(vshader_height_uniform, max_colormap_height);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 1, GL_FLOAT, GL_FALSE, 0, colormap); // colormap is a vector that contains value of power at each vertex
glDrawElements(GL_TRIANGLE_STRIP, vertices_length, GL_UNSIGNED_INT, nullptr); // vertex_length is size of colormap
glDisableVertexAttribArray(0);
glsl_program.release();
This program runs fast enough on Linux. But in Windows, it is very slow and takes a lot of CPU time. If I change this line of GLSL:
// int idx = int(d * (512 - 1) / e);
int idx = 0;
then the app runs fast on Windows too. So, It has to be a problem with GLSL code.
How should I fix it?
What you're doing there belongs into the fragment shader, not the vertex shader. And you submit both the color lookup table and the spectral density data as a texture. Although vertex setup is not that expensive, it comes with a certain overhead and in general you want to cover as many pixels with the least number of vertices possible.
Also learn logarithm calculation rules (e.g. log(a/b) = log(a) - log(b)) and avoid doing calculations that are uniform over the whole draw call and precalculate on the host.
/* vertex shader */
#version 130
varying vec2 pos;
void main() {
// set vertex position based on its ID
// To fill the viewport, we need just three vertices
// of a rectangular triangle of with and height 2
pos.x = gl_VertexID % 2;
pos.y = gl_VertexID / 2;
// screen position is controlled using glViewport/glScissor
gl_Position = vec4(2*pos, 0, 1.0);
}
-
/* fragment shader */
#version 130
varying vec2 pos;
uniform sampler2D values;
uniform sampler1D colors;
uniform float log_min;
uniform float log_max;
void main() {
float val = texture2D(values, pos).x;
float e = log_max - log_min;
float d = (log(val) - log_min) / e;
gl_FragColor = vec4(texture1D(colors, d).rgb, 1.0); // set color
}
In later versions of GLSL some keywords have changed. Varyings are defined using in and out instead of varying and texture access functions have been unified to cover all sampler types.
glsl_program.bind();
glsl_program.setUniformValue(vshader_log_max_uniform, log(max_val));
glsl_program.setUniformValue(vshader_log_min_uniform, log(min_val));
// specify where to draw in window pixel coordinates.
glEnable(GL_SCISSOR_TEST);
glViewport(x, y, width, height);
glScissor(x, y, width, height);
glBindTexture(GL_TEXTURE_2D, values_texture);
glTexSubImage2D(GL_TEXTURE_2D, ..., spectral_density_data);
glDrawArrays(GL_TRIANGLES, 0, 3);
glsl_program.release();

SSAO sample kernels causes performance drop when camera is close to model?

I have a problem in which when the camera gets closer to the model the performance drops.
I figured out that it was todo with the ssao sample kernels, but I cant seem to figure out why these are causing performance issues when close to a mesh.
When I comment out the samples for loop in the ssao render code that is when the performance goes back to how it should be, so this for loop is obviously some how causing the issue. I orginally thought it might be a shader problem but I cant find any problems in there either.
Any ideas? Here is all the code that you need...
SSAO Setup Code
// Create two frame buffers, one for ssao colour and another for ssao blur
_fbos.push_back(new Fbo(width, height, { new FboAttachment(width, height, GL_RED, GL_RGB, GL_FLOAT, GL_COLOR_ATTACHMENT0) }, false));
_fbos.push_back(new Fbo(width, height, { new FboAttachment(width, height, GL_RED, GL_RGB, GL_FLOAT, GL_COLOR_ATTACHMENT0) }, false));
//////////////////////////////////////////////////////////////////////////////////////////////////////////
std::uniform_real_distribution<GLfloat> rand_floats(0.0f, 1.0f); // Generate random floats between 0.0 and 1.0
std::default_random_engine rand_generator; // A generator for randomising floats
// Create temp iterator var
for (unsigned int i = 0; i < 64; ++i) // Iterate through each sample...
{
glm::vec3 sample(rand_floats(rand_generator) * 2.0f - 1.0f, rand_floats(rand_generator) * 2.0f - 1.0f, rand_floats(rand_generator)); // the third parameter was wrong on this line
sample = glm::normalize(sample); // Normalise the sample
sample *= rand_floats(rand_generator); // Seed the randomisation
float scale = static_cast<float>(i) / 64.0f; // Get pixel position in NDC about the resolution size
scale = Math::lerpf(0.1f, 1.0f, scale * scale); // Interpolate the scale
sample *= scale; // Scale the s and t values
_ssao_kernals.push_back(sample); // Assign sample to the kernal array
_u_samples.push_back(glGetUniformLocation(shader_programs[0], ("samples[" + std::to_string(i) + "]").c_str())); // Get each sample uniform location
}
for (unsigned int i = 0; i < 16; i++) // For each sample / 4...
{
glm::vec3 noise(rand_floats(rand_generator) * 2.0f - 1.0f, rand_floats(rand_generator) * 2.0f - 1.0f, 0.0f); // Randomly generate a noise pixel
_ssao_noise.push_back(noise); // Assign noise pixel to noise array
}
/*
* Create a noise texture to remove any banding from the ssao
*/
glGenTextures(1, &_noise_texture); // generate the texture
glBindTexture(GL_TEXTURE_2D, _noise_texture); // bind data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, 4, 4, 0, GL_RGB, GL_FLOAT, &_ssao_noise[0]); // set texture data
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); // texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); // texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); // texture filtering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); // texture filtering
SSAO Render Function
_fbos[0]->Bind(); // bind ssao texture
glClear(GL_COLOR_BUFFER_BIT); // clear colour data on the screen
glUseProgram(_shader_programs[0]); // Use the first shader pass
for (unsigned int i = 0; i < SSAO_SAMPLE_RESOLUTION; ++i) // For each ssao sample...
glUniform3fv(_u_samples[i], 1, glm::value_ptr(_ssao_kernals[i])); // Assign kernal uniform data
glUniformMatrix4fv(_u_projection, 1, GL_FALSE, glm::value_ptr(Content::_map->GetCamera()->GetProjectionMatrix())); // Assign camera projection uniform data
glActiveTexture(GL_TEXTURE0); // Set active texture to index 0
glBindTexture(GL_TEXTURE_2D, _g_buffer_data->GetAttachments()[0]->_texture); // Bind positions
glActiveTexture(GL_TEXTURE1); // Set active texture to index 1
glBindTexture(GL_TEXTURE_2D, _g_buffer_data->GetAttachments()[1]->_texture); // Bind normals
glActiveTexture(GL_TEXTURE2); // Set active texture to index 2
glBindTexture(GL_TEXTURE_2D, _noise_texture); // Bind the noise texture
_screen_rect->Render(1); // Render to screen rectangle
// Blur ssao texture
_fbos[1]->Bind();
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(_shader_programs[1]); // Use the second shader pass
glActiveTexture(GL_TEXTURE0); // Bind active texture to index 0
glBindTexture(GL_TEXTURE_2D, _fbos[0]->GetAttachments()[0]->_texture); // Bind the final colour
_screen_rect->Render(1); // Render to screen rectangle
SSAO Fragment Shader
#version 330 core
out float FragColor;
in vec2 _texcoord;
uniform sampler2D gPosition;
uniform sampler2D gNormal;
uniform sampler2D texNoise;
uniform vec3 samples[64];
int kernelSize = 64;
float radius = 0.3;
float bias = 0.025;
const vec2 noiseScale = vec2(1920.0 / 4.0, 1080.0 / 4.0);
uniform mat4 proj;
void main()
{
vec3 fragPos = texture(gPosition, _texcoord).xyz;
vec3 normal = normalize(texture(gNormal, _texcoord).rgb);
vec3 randomVec = normalize(texture(texNoise, _texcoord * noiseScale).xyz);
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.0;
for(int i = 0; i < kernelSize; ++i)
{
// get sample position
vec3 sample = TBN * samples[i]; // from tangent to view-space
sample = fragPos + sample * radius;
// project sample position (to sample texture) (to get position on screen/texture)
vec4 offset = vec4(sample, 1.0);
offset = proj * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
// get sample depth
float sampleDepth = texture(gPosition, offset.xy).z; // get depth value of kernel sample
// range check & accumulate
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth));
occlusion += (sampleDepth >= sample.z + bias ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / kernelSize);
FragColor = pow(occlusion, 3.0);
}
This is the expected performance characteristic of SSAO.
The closer the texel you're calculating the AO for is to the camera the farther away the sample points around it will be in screen-space, and the less likely it is that those neighboring texels will be in the GPU's texture cache - which causes a massive performance hit.

Why can't access the G-Buffer from my lighting shader?

I implemented a new rendering pipeline in my engine and rendering is broken now. When I directly draw a texture of the G-Buffer to screen, it shows up correctly. So the G-Buffer is fine. But somehow the lighting pass makes trouble. Even if I don't use the resulting texture of it but try to display albedo from G-Buffer after the lighting pass, it shows a solid gray color.
I can't explain this behavior and the strange thing is that there are no OpenGL errors at any point.
Vertex Shader to draw a fullscreen quad.
#version 330
in vec4 vertex;
out vec2 coord;
void main()
{
coord = vertex.xy;
gl_Position = vertex * 2.0 - 1.0;
}
Fragment Shader for lighting.
#version 330
in vec2 coord;
out vec3 image;
uniform int type = 0;
uniform sampler2D positions;
uniform sampler2D normals;
uniform vec3 light;
uniform vec3 color;
uniform float radius;
uniform float intensity = 1.0;
void main()
{
if(type == 0) // directional light
{
vec3 normal = texture2D(normals, coord).xyz;
float fraction = max(dot(normalize(light), normal) / 2.0 + 0.5, 0);
image = intensity * color * fraction;
}
else if(type == 1) // point light
{
vec3 pixel = texture2D(positions, coord).xyz;
vec3 normal = texture2D(normals, coord).xyz;
float dist = max(distance(pixel, light), 1);
float magnitude = 1 / pow(dist / radius + 1, 2);
float cutoff = 0.4;
float attenuation = clamp((magnitude - cutoff) / (1 - cutoff), 0, 1);
float fraction = clamp(dot(normalize(light - pixel), normal), -1, 1);
image = intensity * color * attenuation * max(fraction, 0.2);
}
}
Targets and samplers for the lighting pass. Texture ids are mapped to attachment respectively shader location.
unordered_map<GLenum, GLuint> targets;
targets.insert(make_pair(GL_COLOR_ATTACHMENT2, ...)); // light
targets.insert(make_pair(GL_DEPTH_STENCIL_ATTACHMENT, ...)); // depth and stencil
unordered_map<string, GLuint> samplers;
samplers.insert(make_pair("positions", ...)); // positions from G-Buffer
samplers.insert(make_pair("normals", ...)); // normals from G-Buffer
Draw function for lighting pass.
void DrawLights(unordered_map<string, GLuint> Samplers, GLuint Program)
{
auto lis = Entity->Get<Light>();
glClear(GL_COLOR_BUFFER_BIT);
glEnable(GL_BLEND);
glBlendFunc(GL_ONE, GL_ONE);
glUseProgram(Program);
int n = 0; for(auto i : Samplers)
{
glActiveTexture(GL_TEXTURE0 + n);
glBindTexture(GL_TEXTURE_2D, i.second);
glUniform1i(glGetUniformLocation(Program, i.first.c_str()), n);
n++;
}
mat4 view = Entity->Get<Camera>(*Global->Get<unsigned int>("camera"))->View;
for(auto i : lis)
{
int type = i.second->Type == Light::DIRECTIONAL ? 0 : 1;
vec3 pos = vec3(view * vec4(Entity->Get<Form>(i.first)->Position(), !type ? 0 : 1));
glUniform1i(glGetUniformLocation(Program, "type"), type);
glUniform3f(glGetUniformLocation(Program, "light"), pos.x, pos.y, pos.z);
glUniform3f(glGetUniformLocation(Program, "color"), i.second->Color.x, i.second->Color.y, i.second->Color.z);
glUniform1f(glGetUniformLocation(Program, "radius"), i.second->Radius);
glUniform1f(glGetUniformLocation(Program, "intensity"), i.second->Intensity);
glBegin(GL_QUADS);
glVertex2i(0, 0);
glVertex2i(1, 0);
glVertex2i(1, 1);
glVertex2i(0, 1);
glEnd();
}
glDisable(GL_BLEND);
glActiveTexture(GL_TEXTURE0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindTexture(GL_TEXTURE_2D, 0);
}
I found the error and it was such a stupid one. The old rendering pipeline bound the correct framebuffer before calling the draw function of that pass. But the new one didn't so each draw function had to do that itself. Therefore I wanted to update all draw function, but I missed the draw function of the lighting pass.
Therefore the framebuffer of the G-Buffer was still bound and the lighting pass changed its targets.
Thanks to you guys, you had no change to find that error, since I hadn't posted my complete pipeline system.