Opengl 3.3+ incorrect shadows when using shadow mapping - opengl

I am trying to implement shadow mapping on my landscape editor with OpenGL 3.3+. Using a few tutorials I have managed to get my code to compile and run but the whole landscape is in shadow except for the back row of my landscape grid (smallest z).
I am currently using the same projection, view and model matrices for my light as the camera (negative z is furthest from the camera).
Initialisation of my projection, view and model matrices (from LWJGL matrix tutorial):
modelPos = new Vector3f(0f, 0f, -20f);
modelAngle = new Vector3f(15f, 0f, 0f);
modelScale = new Vector3f(1f, 1f, 1f);
cameraPos = new Vector3f(-50f, 0f, -120f);
projectionMatrix = new Matrix4f();
float fieldOfView = 120f;
float aspectRatio = (float)width / (float)height;
float near_plane = 0.01f;
float far_plane = 100f;
float y_scale = DepthMatrixUtility.coTangent(DepthMatrixUtility.degreesToRadians(fieldOfView / 2f));
float x_scale = y_scale / aspectRatio;
float frustum_length = far_plane - near_plane;
projectionMatrix.m00 = x_scale;
projectionMatrix.m11 = y_scale;
projectionMatrix.m22 = -((far_plane + near_plane) / frustum_length);
projectionMatrix.m23 = -1;
projectionMatrix.m32 = -((2 * near_plane * far_plane) / frustum_length);
Binding my matrices when displaying scene:
Matrix4f.translate(cameraPos, viewMatrix, viewMatrix);
Matrix4f.scale(modelScale, modelMatrix, modelMatrix);
Matrix4f.translate(modelPos, modelMatrix, modelMatrix);
Matrix4f.rotate(DepthMatrixUtility.degreesToRadians(modelAngle.z), new Vector3f(0, 0, 1), modelMatrix, modelMatrix);
Matrix4f.rotate(DepthMatrixUtility.degreesToRadians(modelAngle.y), new Vector3f(0, 1, 0), modelMatrix, modelMatrix);
Matrix4f.rotate(DepthMatrixUtility.degreesToRadians(modelAngle.x), new Vector3f(1, 0, 0), modelMatrix, modelMatrix);
matrix = new Matrix4f();
Matrix4f.mul(matrix, projectionMatrix, matrix);
Matrix4f.mul(matrix, viewMatrix, matrix);
Matrix4f.mul(matrix, modelMatrix, matrix);
matrix.store(matrix44Buffer);
matrix44Buffer.flip();
matrixLocation = GL20.glGetUniformLocation(pId, "matrix");
GL20.glUniformMatrix4(matrixLocation, false, matrix44Buffer);
I have tested my FBO with storing colour in the fragment shader, the height map displays correctly (I drew the FBO texture to a small quad in the corner of my screen) and updates as I alter the height map.
I then modified my FBO to store the depth to a texture on the first pass:
depthTexture = GL11.glGenTextures();
GL11.glBindTexture(GL11.GL_TEXTURE_2D, depthTexture);
GL11.glTexImage2D(GL11.GL_TEXTURE_2D, 0, GL11.GL_DEPTH_COMPONENT, Window.getScreenWidth(), Window.getScreenHeight(), 0, GL11.GL_DEPTH_COMPONENT, GL11.GL_UNSIGNED_BYTE, (ByteBuffer)null);
GL11.glTexParameterf(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_WRAP_S, GL11.GL_NEAREST);
GL11.glTexParameterf(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_WRAP_T, GL11.GL_NEAREST);
GL11.glTexParameteri(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_MAG_FILTER, GL11.GL_LINEAR);
GL11.glTexParameteri(GL11.GL_TEXTURE_2D, GL11.GL_TEXTURE_MIN_FILTER, GL11.GL_LINEAR);
GL11.glBindTexture(GL11.GL_TEXTURE_2D, 0);
fboId = GL30.glGenFramebuffers();
GL30.glBindFramebuffer(GL30.GL_FRAMEBUFFER, fboId);
GL11.glDrawBuffer(GL11.GL_NONE);
GL11.glReadBuffer(GL11.GL_NONE);
GL32.glFramebufferTexture(GL30.GL_FRAMEBUFFER, GL30.GL_DEPTH_ATTACHMENT, depthTexture, 0);
verifyFBO();
My vertex shader for the first pass (Creating the shadow map):
#version 330 core
uniform mat4 matrix;
in vec4 in_Position;
void main(void)
{
gl_Position = matrix * in_Position;
}
My fragment shader for the first pass:
#version 330 core
layout(location = 0) out float fragmentdepth;
void main(void)
{
fragmentdepth = gl_FragCoord.z;
}
My bias matrix:
[0.5f, 0.0f, 0.0f, 0.0f]
[0.0f, 0.5f, 0.0f, 0.0f]
[0.0f, 0.0f, 0.5f, 0.0f]
[0.5f, 0.5f, 0.5f, 1.0f]
My vertex shader for the second pass (rendering the scene using the shadow map):
void main(void)
{
gl_Position = matrix * in_Position;
ShadowCoord = biasMatrix * lightMatrix * in_Position;
}
My fragment shader for the second pass:
if (texture(shadowMap, ShadowCoord.xy).z < ShadowCoord.z)
{
vec4 colour = 0.5 * out_Colour;
out_Colour = new vec4(colour[0], colour[1], colour[2], 1.0f);
}

After transforming in_Position with lightMatrix, the result is not projected on screen yet.
Actual perspective projection is applied by dividing by w component.
The perspective division will give you texture coordinates and depth in [-1,1] range.
At this point you use biasMatrix to transform them to [0,1] range.
So you shouldn't multiply by biasMatrix, then in your shader before the line
if (texture(shadowMap, ShadowCoord.xy).z < ShadowCoord.z)
add
ShadowCoord.xyz /= ShadowCoord.w;
ShadowCoord = biasMatrix * ShadowCoord;
The biasMatrix content you're showing should be stored transposed in memory. If you hesitate replace the matrix product with
ShadowCoord.xyz = ShadowCoord.xyz * .5f + float3(.5f);

Related

How to Rotate a Quad

I'm trying to make a quad rotate around its center. I am using glm::rotate() and setting the quad to rotate on the z axis. However when I do this it gives this weird effect. The quad stretches and warps. It almost looks 3d but since I am rotating it around the z axis that shouldn't happen right?
Here's relevant code for context:
float rotation = 0.0f;
double prevTime = glfwGetTime();
while (!glfwWindowShouldClose(window))
{
GLCall(glClearColor(0.0f, 0.0f, 0.0f, 1.0f));
GLCall(glClear(GL_COLOR_BUFFER_BIT));
updateInput(window);
shader.Use();
glUniform1f(xMov, x);
glUniform1f(yMov, y);
test.Bind();
double crntTime = glfwGetTime();
if (crntTime - prevTime >= 1 / 60)
{
rotation += 0.5f;
prevTime = crntTime;
}
glm::mat4 model = glm::mat4(1.0f);
model = glm::rotate(model, glm::radians(rotation), glm::vec3(0.0f, 0.0f, 1.0f));
int modelLoc = glGetUniformLocation(shader.id, "model");
glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model));
vao.Bind();
vBuffer1.Bind();
iBuffer1.Bind();
GLCall(glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0));
glfwSwapBuffers(window);
glfwPollEvents();
}
Shader:
#version 440 core
layout(location = 0) in vec3 aPos;
layout(location = 1) in vec3 aColor;
layout(location = 2) in vec2 aTex;
out vec3 color;
out vec2 texCoord;
uniform float xMove;
uniform float yMove;
uniform mat4 model;
void main()
{
gl_Position = model * vec4(aPos.x + xMove, aPos.y + yMove, aPos.z, 1.0);
color = aColor;
texCoord = aTex;
}
Without you showing the graphical output it is hard to say.
Your first issue is, you are not rotating around the center, to rotate by the center you must, offset the quad so that its center is at 0,0. then rotate, then offset back to the original position, but you have this line:
gl_Position = model * vec4(aPos.x + xMove, aPos.y + yMove, aPos.z, 1.0);
Under the assumption that the quad as at the origin to begin with you are rotating it around the point (-xMove, -yMove).

Opengl vertex coordinates for perspective projection

I need to use perspective transformation but I can't understand how to define model coordinates of sprite. If I use orthogonal projection I can define coordinate of each vertex as number pixels on screen. But with perspective projection I can't.
Orthogonal projection:
glm::ortho<GLfloat>(0.0f, screen_width, screen_height, 0.0f, 1.0f, -1.0f));
Perspective:
glm::perspective(glm::radians(45.f), (float)screen_width / (float)screen_height, 0.1f, 100.f);
Vertex shader:
#version 330 core
layout (std140) uniform Matrices
{
mat4 ProjectionMatrix;
mat4 ViewMatrix;
mat4 ModelMatrix;
};
layout (location = 0) in vec2 position;
layout (location = 1) in vec2 inTexCoords;
out vec2 TextureCoords;
void main()
{
TextureCoords = inTexCoords;
gl_Position = ProjectionMatrix * ViewMatrix * ModelMatrix * vec4(position, 1.f, 1.0);
}
For example
vertices[1] = 0.f;
vertices[8] = 0.f;
vertices[12] = 0.f;
vertices[13] = 0.f;
for (GLuint i = 0; i < m_totSprites; ++i) {
// Vertex pos
vertices[0] = m_clips[i].w;
vertices[4] = vertices[0];
vertices[5] = m_clips[i].h;
vertices[9] = vertices[5];
// Texture pos
vertices[2] = (m_clips[i].x + m_clips[i].w) / tw;
vertices[3] = (m_clips[i].y + m_clips[i].h) / th;
vertices[6] = (m_clips[i].x + m_clips[i].w) / tw;
vertices[7] = m_clips[i].y / th;
vertices[10] = m_clips[i].x / tw;
vertices[11] = m_clips[i].y / th;
vertices[14] = m_clips[i].x / tw;
vertices[15] = (m_clips[i].y + m_clips[i].h) / th;
It works well with orthogonal projection. How can I define vertex coordinates for perspective?
What the different with model coordinates in orthogonal projection and perspective? Why in first case it's easy to set coords of vertices as pixel sizes, but in all examples with perspective they normalized between -0.5 to 0.5? It's necessary?
Initially I was misunderstood difference between orthogonal and perspective projections. As I understood now all vertices mapped initially in NDC for perspective projection. Then they moved, scaled, etc with model matrix. Pixel perfect rendering can be realized only with some constant depth or with orthogonal. I't unuseful for 3D with perspective projection.
if you have projection matrix you need a view matrix too.
there's glm::lookAt() for ex
i use this combo usually
glm::lookAt(glm::vec3(-1.2484,0.483,1.84384), glm::vec3(-0.3801, -0.4183,-3.15),glm::vec3( 0., 0.2,-00.))
glm::perspective(45., 1., 1.2, 300.)
glm::mat4(1.)

How do you calculate a perspective projection matrix?

Right now I have the ability to scale, rotate, and translate points by using a matrix.
// I use a left to right multiplying style (scale, rotate, then translate)
Matrix model = Matrix::Scale(0.4f) * Matrix::Rotation(45.0f, Vector3(0.0f, 0.0f, 1.0f)) * Matrix::Translation(Vector3(0.0f, 0.5f)).Transposed();
// vertex shader code
#version 460 core
layout (location = 0) in vec3 vertexPosition;
uniform mat4 model;
void main() {
gl_Position = model * vec4(vertexPosition, 1.0);
}
The main problem I'm having is creating a perspective projection matrix.
static Matrix Projection(float verticalFoV, float aspectRatio, float zNear, float zFar) {
// is this even correct?
float yScale = (1.0f / tan(verticalFoV / 2.0f)) * aspectRatio;
float xScale = yScale / aspectRatio;
float frustumLength = zFar - zNear;
return Matrix({
xScale, 0, 0, 0,
0, yScale, 0, 0,
0, 0, -((zFar + zNear) / frustumLength), -((2.0f * zNear * zFar) / frustumLength),
0, 0, -1.0f, 0
});
}
Which would then be used like this.
Matrix projection = Matrix::Projection(70.0f * DegreesToRadians, screenWidth / screenHeight, 0.1f, 100.0f);
I send over the matrices without transposing them.
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "model"), 1, false, &model[0][0]);
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "projection"), 1, false, &projection[0][0]);
And I want to be able to multiply them left to right in the vertex shader.
#version 460 core
layout (location = 0) in vec3 vertexPosition;
uniform mat4 model;
uniform mat4 projection;
void main() {
// I'm eventually gonna add view so it'd look like this
// gl_Position = model * view * projection * vec4(vertexPosition, 1.0);
gl_Position = model * projection * vec4(vertexPosition, 1.0);
}
P.S: I want to use a left handed coordinate system. (Right = +X, Up = +Y, Forward = +Z)
OpenGL matrices are stored with column major order. Your matrices are stored with row major order. Hence, you have to multiply the matrices to the vector form the right:
gl_Position = model * projection * vec4(vertexPosition, 1.0);
gl_Position = vec4(vertexPosition, 1.0) * model * projection;

OpenGL Projection Matrix showing Orthographic

I got an orthographic camera working however I wanted to try and implement a perspective camera so I can do some parallax effects later down the line. I am having some issues when trying to implement it. It seems like the depth is not working correctly. I am rotating a 2d image along the x-axis to simulate it laying somewhat down so I get see the projection matrix working. It is still showing as an orthographic perspective though.
Here is some of my code:
CameraPersp::CameraPersp() :
_camPos(0.0f,0.0f,0.0f), _modelMatrix(1.0f), _viewMatrix(1.0f), _projectionMatrix(1.0f)
Function called init to setup the matrix variables:
void CameraPersp::init(int screenWidth, int screenHeight)
{
_screenHeight = screenHeight;
_screenWidth = screenWidth;
_modelMatrix = glm::translate(_modelMatrix, glm::vec3(0.0f, 0.0f, 0.0f));
_modelMatrix = glm::rotate(_modelMatrix, glm::radians(-55.0f), glm::vec3(1.0f, 0.0f, 0.0f));
_viewMatrix = glm::translate(_viewMatrix, glm::vec3(0.0f, 0.0f, -3.0f));
_projectionMatrix = glm::perspective(glm::radians(45.0f), static_cast<float>(_screenWidth) / _screenHeight, 0.1f, 100.0f);
}
Initializing a texture to be loaded in with x,y,z,width,height,src
_sprites.back()->init(-0.5f, -0.5f, 0.0f, 1.0f, 1.0f, "src/content/sprites/DungeonCrawlStoneSoupFull/monster/deep_elf_death_mage.png");
Sending in the matrices to the vertexShader:
GLint mLocation = _colorProgram.getUniformLocation("M");
glm::mat4 mMatrix = _camera.getMMatrix();
//glUniformMatrix4fv(mLocation, 1, GL_FALSE, &(mMatrix[0][0]));
glUniformMatrix4fv(mLocation, 1, GL_FALSE, glm::value_ptr(mMatrix));
GLint vLocation = _colorProgram.getUniformLocation("V");
glm::mat4 vMatrix = _camera.getVMatrix();
//glUniformMatrix4fv(vLocation, 1, GL_FALSE, &(vMatrix[0][0]));
glUniformMatrix4fv(vLocation, 1, GL_FALSE, glm::value_ptr(vMatrix));
GLint pLocation = _colorProgram.getUniformLocation("P");
glm::mat4 pMatrix = _camera.getPMatrix();
//glUniformMatrix4fv(pLocation, 1, GL_FALSE, &(pMatrix[0][0]));
glUniformMatrix4fv(pLocation, 1, GL_FALSE, glm::value_ptr(pMatrix));
Here is my vertex shader:
#version 460
//The vertex shader operates on each vertex
//input data from VBO. Each vertex is 2 floats
in vec3 vertexPosition;
in vec4 vertexColor;
in vec2 vertexUV;
out vec3 fragPosition;
out vec4 fragColor;
out vec2 fragUV;
//uniform mat4 MVP;
uniform mat4 M;
uniform mat4 V;
uniform mat4 P;
void main() {
//Set the x,y position on the screen
//gl_Position.xy = vertexPosition;
gl_Position = M * V * P * vec4(vertexPosition, 1.0);
//the z position is zero since we are 2d
//gl_Position.z = 0.0;
//indicate that the coordinates are nomalized
gl_Position.w = 1.0;
fragPosition = vertexPosition;
fragColor = vertexColor;
// opengl needs to flip the coordinates
fragUV = vec2(vertexUV.x, 1.0 - vertexUV.y);
}
I can see the image "squish" a little because it is still rendering the perspective as orthographic. If I remove the rotation on the x-axis, it is not longer squished because it isn't laying down at all. Any thoughts on what I am doing wrong? I can supply more info upon request but I think I put in most of the meat of things.
Picture:
You shouldn't modify gl_Position.w
gl_Position = M * V * P * vec4(vertexPosition, 1.0); // gl_Position is good
//indicate that the coordinates are nomalized < not true
gl_Position.w = 1.0; // Now perspective divisor is lost, projection isn't correct

OpenGL Orthographic Projections

I'm attempting to set up an orthographic projection in OpenGL, but can't seem to find why this triangle is not rendering correctly (it isn't visible). I have used perspective projection with the same code (apart from my vertex coordinates and projection matrix, of course) and it works fine. I construct the triangle vertices as:
Vertex vertices[] = { Vertex(glm::vec3(0, 600, 0.0), glm::vec2(0.0, 0.0)),
Vertex(glm::vec3(300, 0, 0.0), glm::vec2(0.5, 1.0)),
Vertex(glm::vec3(800 , 600, 0.0), glm::vec2(1.0, 0.0)) };
My camera constructor is:
Camera::Camera(const glm::vec3& pos, int width, int height) {
ortho = glm::ortho(0, width, height, 0, 0, 1000);
this->position = pos;
this->up = glm::vec3(0.0f, 1.0f, 0.0f);
this->forward = glm::vec3(0.0f, 0.0f, 1.0f);
}
I call this as:
camera = Camera(glm::vec3(0, 0, 2), window->getSize().x, window->getSize().y);
Where the window is 800 by 600 pixels. I am uploading a transform to the shader via the function:
void Shader::update(const Transform& transform, const Camera& camera) {
glm::mat4 model = camera.getProjection() * transform.getModel();
glUniformMatrix4fv(uniforms[TRANSFORM_U], 1, GL_FALSE, &model[0][0]);
}
In which camera.getProjection() is:
glm::mat4 Camera::getProjection() const {
return ortho * glm::lookAt(position, glm::vec3(0, 0, 0), up);
}
And transform.getModel() is:
glm::mat4 Transform::getModel() const {
glm::mat4 posMat = glm::translate(pos);
glm::quat rotQuat = glm::quat(glm::radians(rot));
glm::mat4 rotMat = glm::toMat4(rotQuat);
glm::mat4 scaleMat = glm::scale(scl);
return posMat * rotMat * scaleMat;
}
Though I suspect the problem lies in my set up of orthographic projection rather than my transforms, as this worked fine for perspective projection. Can anyone see why the triangle rendered with these coordinates is not visible? I am binding my shader and uploading the projection matrix to it before rendering the mesh. If it helps, my vertex shader is:
#version 120
attribute vec3 position;
attribute vec2 texCoord;
varying vec2 texCoord0;
uniform mat4 transform;
void main()
{
gl_Position = transform * vec4(position, 1.0);
texCoord0 = texCoord;
}
For anyone interested in the issue, it was with:
ortho = glm::ortho(0, width, height, 0, 0, 1000);
Where the arguments are supplied as integers, not floats. Therefore the integer division applied within glm::ortho was creating an incorrect orthographic projection matrix.