Shadow Mapping with Deferred Renderer (OpenGL 4.1, GLSL) - opengl

I've read several articles here at stackoverflow and in some books. But I can't find the error in my code. I have an deferred renderer and saved albedo, depth and normals (including spec) inside the g-buffer. Moreover created an shadow map from a light source from top of the scene.
It looks like this:
Albedo (R8G8B8A8):
Normal (R8G8B8A8):
Linear Depth (F32):
Shadow map from light source (linear depth) (500 meter above camera):
Ok. My shader code of the deferred with shadow mapping look like this:
I have uploaded row major matrices.
Vertex Shader:
layout(row_major) uniform UVSViewMatrix
{
mat4 m_ScreenMatrix;
};
layout(location = 0) in vec3 VertexPosition;
smooth out vec2 PSTexCoord;
void main()
{
vec4 Position = vec4(VertexPosition.xyz, 1.0f);
PSTexCoord = Position.xy;
gl_Position = Position * m_ScreenMatrix;
}
Fragment Shader:
#version 410
// -----------------------------------------------------------------------------
// Input from engine
// -----------------------------------------------------------------------------
layout(row_major) uniform UPSCameraProperties
{
mat4 m_ProjectionMatrix;
mat4 m_CameraView;
vec3 m_CameraPosition;
vec3 m_CameraDirection;
};
layout(row_major) uniform UPSLightProperties
{
mat4 m_LightProjection;
mat4 m_LightView;
vec4 m_LightPosition;
vec4 m_LightAmbientIntensity;
vec4 m_LightDiffuseIntensity;
vec4 m_LightSpecularIntensity;
};
uniform sampler2D PSTextureAlbedo;
uniform sampler2D PSTextureNormalSpecular;
uniform sampler2D PSTextureDepth;
uniform sampler2D PSTextureShadowMap;
// -----------------------------------------------------------------------------
// Input from vertex shader
// ----------------------------------------------------------------- ------------
smooth in vec2 PSTexCoord;
// -----------------------------------------------------------------------------
// Output to systembuffer
// -----------------------------------------------------------------------------
layout (location = 0) out vec4 PSOutput;
// -----------------------------------------------------------------------------
// Functions
// -----------------------------------------------------------------------------
vec3 GetViewSpacePositionFromDepth(float _Depth, vec2 _ScreenPosition, mat4 _InvertedProjectionMatrix)
{
// -----------------------------------------------------------------------------
// Information from:
// http://mynameismjp.wordpress.com/2009/03/10/reconstructing-position-from-depth/
// -----------------------------------------------------------------------------
vec4 ScreenPosition;
ScreenPosition.x = _ScreenPosition.x * 2.0f - 1.0f;
ScreenPosition.y = _ScreenPosition.y * 2.0f - 1.0f;
ScreenPosition.z = _Depth * 2.0f - 1.0f;
ScreenPosition.w = 1.0f;
// -----------------------------------------------------------------------------
// Transform by the inverse projection matrix
// -----------------------------------------------------------------------------
vec4 VSPosition = ScreenPosition * _InvertedProjectionMatrix;
// -----------------------------------------------------------------------------
// Divide by w to get the view-space position
// -----------------------------------------------------------------------------
return (VSPosition.xyz / VSPosition.w);
}
// -----------------------------------------------------------------------------
float GetShadowAtPosition(vec3 _WSPosition)
{
// -----------------------------------------------------------------------------
// Set worls space coord into light projection by multiply with light
// view and projection matrix;
// -----------------------------------------------------------------------------
vec4 LSPosition = vec4(_WSPosition, 1.0f) * m_LightView * m_LightProjection;
// -----------------------------------------------------------------------------
// Divide xyz by w to get the position in light view's clip space.
// -----------------------------------------------------------------------------
LSPosition.xyz /= LSPosition.w;
// -----------------------------------------------------------------------------
// Get uv texcoords for this position
// -----------------------------------------------------------------------------
vec3 ShadowCoord = LSPosition.xyz * 0.5f + 0.5f;
// -----------------------------------------------------------------------------
// Get final depth at this texcoord and compare it with the real
// position z value (do a manual depth test)
// -----------------------------------------------------------------------------
float DepthValue = texture( PSTextureShadowMap, vec2(ShadowCoord.x, ShadowCoord.y) ).r;
float Shadow = 1.0f;
if (ShadowCoord.z > DepthValue)
{
Shadow = 0.3f;
}
return Shadow;
}
// -----------------------------------------------------------------------------
// Main
// -----------------------------------------------------------------------------
void main()
{
// -----------------------------------------------------------------------------
// Get informations from g-buffer
// -----------------------------------------------------------------------------
vec2 TexCoord = vec2(PSTexCoord.s, 1.0f - PSTexCoord.t);
vec4 AlbedoColor = texture(PSTextureAlbedo , TexCoord);
vec4 NormalSpec = texture(PSTextureNormalSpecular, TexCoord);
float Depth = texture(PSTextureDepth , TexCoord).r;
vec3 VSPosition = GetViewSpacePositionFromDepth(Depth, TexCoord, inverse(m_ProjectionMatrix));
vec3 Normal = normalize(NormalSpec.xyz);
float SpecularExponent = NormalSpec.w;
vec4 WSPosition = vec4(VSPosition, 1.0f) * inverse(m_CameraView);
// -----------------------------------------------------------------------------
// Compute lighting (Light Accumulation)
// -----------------------------------------------------------------------------
vec3 CameraPosition = m_CameraPosition.xyz;
vec3 LightPosition = m_LightPosition.xyz;
vec3 EyeDirection = WSPosition.xyz - CameraPosition;
vec3 LightDirection = normalize(LightPosition - WSPosition.xyz);
vec3 LightReflection = normalize(-reflect(LightDirection, Normal));
vec4 AmbientColor = m_LightAmbientIntensity;
vec4 DiffuseColor = clamp(m_LightDiffuseIntensity * max(dot(Normal, LightDirection), 0.0f), 0.0f, 1.0f);
vec4 SpecularColor = clamp(m_LightSpecularIntensity * pow(max(dot(LightReflection, EyeDirection), 0.0f), SpecularExponent), 0.0f, 1.0f);
float Shadow = GetShadowAtPosition(WSPosition.xyz);
// -----------------------------------------------------------------------------
// Final result
// -----------------------------------------------------------------------------
PSOutput = vec4((AlbedoColor * (AmbientColor + DiffuseColor) * Shadow).xyz, 1.0f);
}
At the end my result look like this:
Did anyone can see my mistake?
Some measurement results:
ShadowCoord.xy is always 0.5, 0.5
ShadowCoord.z seams to be 1.0f
Here are some variable values:
LightProjection
(
1.29903805f, 0.0f, 0.0, 0.0f,
0.0f, 1.73205066f, 0.0f, 0.0f,
0.0f, 0.0f, -1.00024426f, -1.0f,
0.0f, 0.0f, -1.00024426f, 0.0f
);
LightView
(
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, -1500.0f, 1.0f
);
CameraProjection
(
1.29903805f, 0.0f, 0.0f, 0.0f,
0.0f, 1.73205066f, 0.0f, 0.0f,
0.0f, 0.0f, -1.00024426f, -1.0f,
0.0f, 0.0f, -1.00024426f, 0.0f
);
CameraView
(
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, -1000.0f, 1.0f
);

One issue I can see is a difference between linear and hyperbolic depth. It looks like your "Linear Depth (F32)" G-buffer depth is actually hyperbolic, which is fine because the code expects this. However the light's depth is in fact linear but is then compared against the clip space depth after perspective divide.
It'd probably be easiest to just make the light buffer's depth hyperbolic (gl_FragCoord.z), but changing to compare against eye-space Z should work too:
float ShadowCoordZ = -(vec4(_WSPosition, 1.0f) * m_LightView).z;
The bit about "ShadowCoord.xy is always 0.5, 0.5" is confusing. The G-buffer texture coord seems to work. I can't really tell with the shadow, but do the lighting equations work? If so maybe something's wrong with the matrices? I'd also pass in the inverse of your matrices as uniforms so time isn't spent computing it in the shader.

Related

opengl get window space to world space cursor position

i am trying to get the mouse cursor in world space pos from window space(-1,1 window width and height) using the viewprojectionmatrix.
This is how i calculate my projection matrix:
static mat4 Perspective4x4(float FOV, float AspectRatio, float FarC, float NearC)
{
// Positive x is right
// Positive y is up
// Positive z is forward into the screen
float Cotangent = 1.0f / tanf((FOV)*0.5f);
float Depth = NearC - FarC;
float A = (-FarC - NearC) / Depth;
float B = 2.0f * FarC * NearC / Depth;
return
{
Cotangent/AspectRatio, 0.0f, 0.0f, 0.0f,
0.0f, Cotangent, 0.0f, 0.0f,
0.0f, 0.0f, -A, -B,
0.0f, 0.0f, 1.0f, 0.0f,
};
}
...
float WidthOverHeight = ...;
mat4 ProjectionMatrix = Perspective4x4(DegreesToRadians(90.0f), WidthOverHeight, 50.0f, 0.1f);
This is how i calculate my view matrix:
static mat4 Translate4x4(vec3 V)
{
return
{
1.0f, 0.0f, 0.0f, V.X,
0.0f, 1.0f, 0.0f, V.Y,
0.0f, 0.0f, 1.0f, V.Z,
0.0f, 0.0f, 0.0f, 1.0f
};
}
...
mat4 ViewMatrix = Translate4x4(-CameraPosition);
This is how i calculate the cursor position in worldspace:
mat4 ProjectionMatrix = ...;
mat4 ViewMatrix = ...;
vec2 CursorP = GetBilateralCursorPos(Input); // Values between -1 and 1
v4_f32 WorldSpacePNear = Inverse(ProjectionMatrix) * V4F32(CursorP, -1.0f, 1.0f);
WorldSpacePNear /= WorldSpacePNear.W;
WorldSpacePNear = Inverse(ViewMatrix) * WorldSpacePNear;
v4_f32 WorldSpacePFar = Inverse(ProjectionMatrix) * V4F32(CursorP, 1.0f, 1.0f);
WorldSpacePFar /= WorldSpacePFar.W;
WorldSpacePFar = Inverse(ViewMatrix) * WorldSpacePFar;
WorldSpacePFar.Z *= -1.0f;
WorldSpacePNear.Z *= -1.0f;
EDIT: I also tried dividing by W at the end but it doesnt work properly either.
This is how i send the matrices to OpenGL (legacy):
// I send them transposed because my matrices are row-major
mat4 ProjectionMatrix = Transpose(...);
mat4 ViewMatrix = Transpose(...);
glMatrixMode(GL_PROJECTION);
glLoadMatrixf(ProjectionMatrix.E);
glMatrixMode(GL_MODELVIEW);
glLoadMatrixf(ViewMatrix.E);
The resulting positions do not seem to take in account the view projection because a change in the camera position will offset them making them not accurate.
NOTES:
vec4, vec2, mat4 are custom float-based math types. (they are what you would expect)
I know legacy OpenGL is deprecated, in fact im going to switch to modern opengl very soon, i just want to get this working.
i fixed the issue calculating the cursor pos like this:
mat4 ProjectionMatrix = ...;
mat4 ViewMatrix = ...;
// Z Distance from camera pos
float WorldDistanceFromCameraZ = 1.0f;
vec2 CursorP = GetBilateralCursorPos(Input); // Values between -1 and 1
vec4 ProbeZ = V4F32(World->Camera.P - WorldDistanceFromCameraZ*World->Camera.P.Z, 1.0f);
ProbeZ = (ProjectionMatrix*ViewMatrix) * ProbeZ;
vec4 ClipP = V4F32(CursorP.X*ProbeZ.W, CursorP.Y*ProbeZ.W, ProbeZ.Z, ProbeZ.W);
vec4 WorldP = Inverse(ProjectionMatrix*ViewMatrix) * ClipP;

Having trouble when using 2d orthographic matrix with glm

after I've set up the (orthogonal) projection matrix for my simple 2d game, nothing renders on the screen. I am using cglm (glm but in c) and compared the results of cglm with the normal glm ortho projection implementation that renders well, and the results of the projection matrix match. Here is my render loop:
void RenderSprite(const struct Sprite *sprite) {
struct Shader *shader = GetSpriteShader(sprite);
UseShader(shader);
/* cglm starts here */
mat4 proj;
glm_ortho(0.0f, 800.0f, 600.0f, 0.0f, -1.0f, 1.0f, proj); /* screen width: 800, height: 600 */
mat4 model;
glm_mat4_identity(model); /* an identity model matrix - does nothing */
/* cglm ends here */
SetShaderUniformMat4(shader, "u_Projection", proj); /* set the relevant uniforms */
SetShaderUniformMat4(shader, "u_Model", model);
/* finally, bind the VAO and call the draw call (note that I am not using batch rendering - I am using just a simple plain rendering system) */
glBindVertexArray(ezGetSpriteVAO(sprite));
glDrawElements(GL_TRIANGLES, ezGetSpriteIndexCount(sprite), GL_UNSIGNED_INT, 0);
}
However, this results in a blank screen - nothing renders. I believe I've did everything in order - but the problem is nothing renders.
For anyone interested, here is my vertex shader:
#version 330 core
layout (location = 0) in vec3 pos;
layout (location = 1) in vec2 uv;
uniform mat4 u_Model;
uniform mat4 u_Projection;
void main() {
gl_Position = u_Projection * u_Model * vec4(pos, 1.0f);
}
And here is my fragment shader:
#version 330 core
out vec4 color;
void main() {
color = vec4(1.0f);
}
As far as I am aware, cglm matrices are ordered column-major, which OpenGL wants.
Any help would be appreciated.
Thanks in advance.
EDIT
The sprite coordinates are (in this case it is the vertex data, I guess):
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
-0.5f, 0.5f, 0.0f
EDIT 2
After #BDL's comment, I have adjusted the vertex data as following:
float vertices[] = {
/* Position UV */
5.0f, 5.0f, 0.0f, 0.0f, 0.0f,// bottom left
10.0f, 5.0f, 0.0f, 1.0f, 0.0f, // bottom right
10.0f, 10.0f, 0.0f, 1.0f, 1.0f, // top right
5.0f, 10.0f, 0.0f, 0.0f, 1.0f // top left
};
But, I can't see anything on the screen - nothing is being rendered at this point.
As #BDL and others have suggested, here is my final render loop that works like a charm right now:
(posting as a reference)
void RenderSprite(const struct Sprite *sprite) {
ASSERT(sprite, "[ERROR]: Can't render a sprite which is NULL\n");
struct Shader *shader = GetSpriteShader(sprite);
UseShader(shader);
mat4 proj;
glm_ortho(0.0f, 800.0f, 600.0f, 0.0f, -1.0f, 1.0f, proj);
mat4 model, view;
glm_mat4_identity(model);
glm_mat4_identity(view);
SetShaderUniformMat4(shader, "u_Projection", proj);
SetShaderUniformMat4(shader, "u_Model", model);
SetShaderUniformMat4(shader, "u_View", view);
glBindVertexArray(GetSpriteVAO(sprite));
glDrawElements(GL_TRIANGLES, GetSpriteIndexCount(sprite), GL_UNSIGNED_INT, 0);
}
And here are my coordinates:
float vertices[] = {
/* Position UV */
350.0f, 350.0f, 0.0f, 0.0f, 0.0f,// bottom left
450.0f, 350.0f, 0.0f, 1.0f, 0.0f, // bottom right
450.0f, 250.0f, 0.0f, 1.0f, 1.0f, // top right
350.0f, 250.0f, 0.0f, 0.0f, 1.0f // top left
};
Also here are my shaders:
const char *default_vs = "#version 330 core\n"
"layout (location = 0) in vec3 pos;\n"
"layout (location = 1) in vec2 uv;\n"
"uniform mat4 u_Model;\n"
"uniform mat4 u_Projection;\n"
"uniform mat4 u_View;\n"
"void main() {\n"
" gl_Position = u_Projection * u_View * u_Model * vec4(pos, 1.0f);\n"
"}";
const char *default_fs = "#version 330 core\n"
"out vec4 color;\n"
"void main() {\n"
" color = vec4(1.0f);\n"
"}";
Hope this helps anyone that is having the same problem!

How I can draw a tile from a texture tilemap in an OpenGL shader?

So I have an AtlasTexture that contains all the tiles I need to draw a tile map.
Right now I pass the AtlasTexture through a uniform, and the idea is to change the texture coordinate to select just the portion I need from the atlas.
The issue is that I can only specify on the fragment shader to cut the texture from the zero origin, is it possible to specify an offsetX to tell to the shader where I want to start drawing?
float vertices[] = {
// aPosition // aTextureCoordinate
0.0f, 0.0f, 0.0f, 0.0f,
100.0f, 0.0f, 1.0f, 0.0f,
0.0f, 100.0f, 0.0f, 1.0f,
100.0f, 100.0f, 1.0f, 1.0f,
};
uint32_t indices[] = {0, 1, 2, 2, 3, 1};
Vertex shader
#version 330 core
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoordinate;
out vec2 textureCoordinate;
void main() {
gl_Position = vec4( aPosition.x, aPosition.y, 1.0f, 1.0f);
textureCoordinate = vec2(
aTextureCoordinate.x / 3.0f, // This selects the first tile in the uAtlasTexture
aTextureCoordinate.y
);
}
Fragment shader
#version 330 core
in vec2 textureCoordinate;
uniform sampler2D uAtlasTexture; // Has 3 tiles
out vec4 color;
void main() {
color = texture(uAtlasTexture, textureCoordinate);
}
Use an Uniform variable for the offset. `vec2(1.0/3.0 + aTextureCoordinate.x / 3.0f, aTextureCoordinate.y); "selects2 the 2nd tile. Use a uniform instead of 1.0/3.0:
#version 330 core
layout(location = 0) in vec2 aPosition;
layout(location = 1) in vec2 aTextureCoordinate;
out vec2 textureCoordinate;
uniform float textureOffsetX;
void main() {
gl_Position = vec4( aPosition.x, aPosition.y, 1.0f, 1.0f);
textureCoordinate = vec2(
textureOffsetX + aTextureCoordinate.x / 3.0f,
aTextureCoordinate.y
);
}

vertex shader uniform distortion

I have a quad, composed by two triangles, defined like so:
glm::vec3 coords[] = {
glm::vec3(-1.0f, -1.0f, -0.1f),
glm::vec3( 1.0f, -1.0f, -0.1f),
glm::vec3( 1.0f, 1.0f, -0.1f),
glm::vec3(-1.0f, 1.0f, -0.1f)
};
glm::vec3 normals[] = {
glm::vec3(0.0f, 0.0f, 1.0f),
glm::vec3(0.0f, 0.0f, 1.0f),
glm::vec3(0.0f, 0.0f, 1.0f),
glm::vec3(0.0f, 0.0f, 1.0f)
};
glm::vec2 texCoords[] = {
glm::vec2(0.0f, 0.0f),
glm::vec2(1.0f, 0.0f),
glm::vec2(1.0f, 1.0f),
glm::vec2(0.0f, 1.0f)
};
unsigned int indices[] = {
0, 1, 2,
2, 3, 0
};
I'm trying to change the quad's 'height' via a black and white jpg so I wrote a vertex shader to do this, however the transformation is not being applied directly to all the points of the quad. Here's the jpg I'm using:
I expect a sudden constant bump where the image turns white, but this is what I'm getting: https://i.gyazo.com/639a699e7aa12cda2f644201d787c507.gif. It appears that only the top left corner is reaching the maximum height, and that somehow the whole entire left triangle is being distorted.
My vertex shader:
layout(location = 0) in vec3 vertex_position;
layout(location = 1) in vec3 vertex_normal;
layout(location = 2) in vec2 vertex_texCoord;
layout(location = 3) in vec4 vertex_color;
out vec2 v_TexCoord;
out vec4 v_Color;
out vec3 v_Position;
out vec3 v_Normal;
//model view projection matrix
uniform mat4 u_MVP;
uniform mat4 u_ModelMatrix;
uniform sampler2D u_Texture1_Height;
void main()
{
v_TexCoord = vertex_texCoord;
v_Color = vertex_color;
v_Normal = mat3(u_ModelMatrix) * vertex_normal;
vec4 texHeight = texture(u_Texture1_Height, v_TexCoord);
vec3 offset = vertex_normal * (texHeight.r + texHeight.g + texHeight.b) * 0.33;
v_Position = vec3(u_ModelMatrix * vec4(vertex_position + offset, 1.0f));
gl_Position = u_MVP * vec4(vertex_position + offset, 1.0f);
}
The bump map is just evaluated per vertex rather than per fragment, because you do the computation in the vertex shader. The vertex shader is just executed for each vertex (for each corner of the quad). Compare Vertex Shader and Fragment Shader.
It is not possible to displace the clip space coordinate for each fragment. You have to tessellate the geometry (the quad) to a lot of small quads. Since the vertex shader is executed for each fragment, the geometry is displaced for each corner point in the mesh. This is the common approach. See this simulation.
Another possibility is to implement parallax mapping, where a depth effect is accomplished by displacing the texture coordinates and distort the normal vectors in the fragment shader. See Normal, Parallax and Relief mapping respectively LearnOpenGL - Parallax Mapping or Bump Mapping with glsl.

How to position an object in model / view / world space?

I have a cube which is defined with the centre at 0,0,0 and the edges reaching out to -1/+1 (i.e. the cube has width, height, depth of 2).
I then setup the following matrices:
glm::mat4 modelMat4;
modelMat4 = glm::translate(modelMat4, 0.0f, 0.0f, 200.f);
modelMat4 = glm::scale(modelMat4, 200.0f, 200.0f, 200.0f);
glm::mat4 viewMat4;
viewMat4 = glm::lookAt(glm::vec3(0.0f, 0.0f, zNear),
glm::vec3(0.0f, 0.0f, zFar),
glm::vec3(0.0f, 1.0f, 0.0f));
// initialWidth = window width
// initialHeight = window height
// zNear = 1.0f
// zFar = 1000.0f
glm::mat4 projectionMat4;
projectionMat4 = glm::frustum(-initialWidth / 2.0f, initialWidth / 2.0f, -initialHeight / 2.0f, initialHeight / 2.0f, zNear, zFar);
But the middle of my object appears at the near z-plane (i.e. I can only see the back half of my cube, from the inside).
If I adjust the model transform to be:
glm::translate(modelMat4, 0.0f, 0.0f, 204.f);
Then I can see the front side of my cube as well.
If I change the model transform to be:
glm::translate(modelMat4, 0.0f, 0.0f, 250.f);
Then the cube only rasterises at approx 2x2x2 pixels.
What am I misunderstanding about model, view projection matrices? I was expecting the transform to be linear, but the z-plane disappears between 200 and 250. Even though the planes are defined between 1.0f and 1000.0f.
Edit: My shader code is below:
#version 100
layout(location = 0) in vec3 v_Position;
layout(location = 1) in vec4 v_Colour;
layout(location = 2) uniform mat4 v_ModelMatrix;
layout(location = 3) uniform mat4 v_ViewMatrix;
layout(location = 4) uniform mat4 v_ProjectionMatrix;
out vec4 f_inColour;
void main()
{
gl_Position = v_ProjectionMatrix * v_ViewMatrix * v_ModelMatrix * vec4(v_Position, 1.0);
f_inColour = v_Colour;
}
You didn't show how you are multiplying your matrices, but for what you describe, its seems that it could be doing wrong. Be sure to do in this order:
MVP = projectionMat4 * viewMat4 * modelMat4;
UPDATED
Looking more carefully into your code, it is seeming that you are lacking a multiplication to concanate your transformations:
modelMat4 = glm::translate(modelMat4, 0.0f, 0.0f, 200.f);
modelMat4 *= glm::scale(modelMat4, 200.0f, 200.0f, 200.0f); // <-- here
so, modelMat4 will be the results of a scale and then a translation