this is my CAMERA class which makes viewMat uniform var in vertexshader.
struct CAMERA {
glm::vec3 EYE{ 0.0f,0.0f,150.0f };
glm::vec3 AT{ 0.0f,0.0f,0.0f };
glm::vec3 UP{ 0.0f,1.0f,0.0f };
glm::vec3 Dir() { return glm::normalize(this->EYE - this->AT); }
glm::vec3 Right() { return glm::normalize(glm::cross(this->UP, this->Dir())); }
glm::vec3 Up() { return glm::normalize(glm::cross(this->Dir(), this->Right())); }
glm::mat4 view_M() {
glm::vec3 cameraDirection = glm::normalize(this->EYE - this->AT);
glm::vec3 cameraRight = glm::normalize(glm::cross(this->UP, cameraDirection));
glm::vec3 cameraUp = glm::normalize(glm::cross(cameraDirection, cameraRight));
return glm::lookAt(this->EYE, cameraDirection, cameraUp);
}
}camera;
and I want to make camera-movement go up down right left.
I think this will be work.. but this code works not like I expect..
// go up
glm::vec3 t{ camera.Up() };
camera.EYE = glm::translate(glm::mat4(1.0f), t) * glm::vec4(camera.EYE,1.0f);
camera.AT = glm::translate(glm::mat4(1.0f), t) * glm::vec4(camera.AT, 1.0f);
this is what I expected.. camera go up and objects are seem go down.. red dot is not real obj.
How can I make camera movement with (CAMERA)camera struct?
if (up) {
glm::mat4 R = glm::rotate(glm::mat4(1.0f), glm::radians(-degree), camera.Right());
camera.EYE = glm::vec3(R * glm::vec4(camera.EYE, 1.0f));
camera.UP = glm::vec3(R * glm::vec4(camera.UP, 1.0f));
}
if (down) {
glm::mat4 R = glm::rotate(glm::mat4(1.0f), glm::radians(degree), camera.Right());
camera.EYE = glm::vec3(R * glm::vec4(camera.EYE, 1.0f));
camera.UP = glm::vec3(R * glm::vec4(camera.UP, 1.0f));
}
if (right) {
glm::mat4 R = glm::rotate(glm::mat4(1.0f), glm::radians(degree), camera.Up());
camera.EYE = glm::vec3(R * glm::vec4(camera.EYE, 1.0f));
camera.UP = glm::vec3(R * glm::vec4(camera.UP, 1.0f));
}
if (left) {
glm::mat4 R = glm::rotate(glm::mat4(1.0f), glm::radians(-degree), camera.Up());
camera.EYE = glm::vec3(R * glm::vec4(camera.EYE, 1.0f));
camera.UP = glm::vec3(R * glm::vec4(camera.UP, 1.0f));
}
This is my satelite movement moving around 0,0
this code works right.
I'm trying to calculate tight ortho projection around the camera for better shadow mapping. I'm first calculating the camera frustum 8 points in world space using basic trigonometry using fov, position, right, forward, near, and far parameters of the camera as follows:
PerspectiveFrustum::PerspectiveFrustum(const Camera* camera)
{
float height = tanf(camera->GetFov() / 2.0f) * camera->GetNear();
float width = height * Screen::GetWidth() / Screen::GetHeight();
glm::vec3 nearTop = camera->GetUp() * camera->GetNear() * height;
glm::vec3 nearRight = camera->GetRight() * camera->GetNear() * width;
glm::vec3 nearCenter = camera->GetEye() + camera->GetForward() * camera->GetNear();
glm::vec3 farTop = camera->GetUp() * camera->GetFar() * height;
glm::vec3 farRight = camera->GetRight() * camera->GetFar() * width;
glm::vec3 farCenter = camera->GetEye() + camera->GetForward() * camera->GetFar();
m_RightNearBottom = nearCenter + nearRight - nearTop;
m_RightNearTop = nearCenter + nearRight + nearTop;
m_LeftNearBottom = nearCenter - nearRight - nearTop;
m_LeftNearTop = nearCenter - nearRight + nearTop;
m_RightFarBottom = farCenter + nearRight - nearTop;
m_RightFarTop = farCenter + nearRight + nearTop;
m_LeftFarBottom = farCenter - nearRight - nearTop;
m_LeftFarTop = farCenter - nearRight + nearTop;
}
Then I calculate the frustum in light view and calculating the min and max point in each axis to calculate the bounding box of the ortho projection as follows:
inline glm::mat4 GetView() const
{
return glm::lookAt(m_Position, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
}
glm::mat4 DirectionalLight::GetProjection(const Camera& camera) const
{
PerspectiveFrustum frustum = camera.GetFrustum();
glm::mat4 lightView = GetView();
std::array<glm::vec3, 8> frustumToLightView
{
lightView * glm::vec4(frustum.m_RightNearBottom, 1.0f),
lightView * glm::vec4(frustum.m_RightNearTop, 1.0f),
lightView * glm::vec4(frustum.m_LeftNearBottom, 1.0f),
lightView * glm::vec4(frustum.m_LeftNearTop, 1.0f),
lightView * glm::vec4(frustum.m_RightFarBottom, 1.0f),
lightView * glm::vec4(frustum.m_RightFarTop, 1.0f),
lightView * glm::vec4(frustum.m_LeftFarBottom, 1.0f),
lightView * glm::vec4(frustum.m_LeftFarTop, 1.0f)
};
glm::vec3 min{ INFINITY, INFINITY, INFINITY };
glm::vec3 max{ -INFINITY, -INFINITY, -INFINITY };
for (unsigned int i = 0; i < frustumToLightView.size(); i++)
{
if (frustumToLightView[i].x < min.x)
min.x = frustumToLightView[i].x;
if (frustumToLightView[i].y < min.y)
min.y = frustumToLightView[i].y;
if (frustumToLightView[i].z < min.z)
min.z = frustumToLightView[i].z;
if (frustumToLightView[i].x > max.x)
max.x = frustumToLightView[i].x;
if (frustumToLightView[i].y > max.y)
max.y = frustumToLightView[i].y;
if (frustumToLightView[i].z > max.z)
max.z = frustumToLightView[i].z;
}
return glm::ortho(min.x, max.x, min.y, max.y, min.z, max.z);
}
Doing this gives me empty shadow map, so something clearly wrong and I haven't being doing this right. Can someone help me by telling me what I'm doing wrong and why?
EDIT:
As said my calculations of the frustum were wrong and I've changed them to the following:
PerspectiveFrustum::PerspectiveFrustum(const Camera* camera)
{
float nearHalfHeight = tanf(camera->GetFov() / 2.0f) * camera->GetNear();
float nearHalfWidth = nearHalfHeight * Screen::GetWidth() / Screen::GetHeight();
float farHalfHeight = tanf(camera->GetFov() / 2.0f) * camera->GetFar();
float farHalfWidth = farHalfHeight * Screen::GetWidth() / Screen::GetHeight();
glm::vec3 nearCenter = camera->GetEye() + camera->GetForward() * camera->GetNear();
glm::vec3 nearTop = camera->GetUp() * nearHalfHeight;
glm::vec3 nearRight = camera->GetRight() * nearHalfWidth;
glm::vec3 farCenter = camera->GetEye() + camera->GetForward() * camera->GetFar();
glm::vec3 farTop = camera->GetUp() * farHalfHeight;
glm::vec3 farRight = camera->GetRight() * farHalfWidth;
m_RightNearBottom = nearCenter + nearRight - nearTop;
m_RightNearTop = nearCenter + nearRight + nearTop;
m_LeftNearBottom = nearCenter - nearRight - nearTop;
m_LeftNearTop = nearCenter - nearRight + nearTop;
m_RightFarBottom = farCenter + farRight - farTop;
m_RightFarTop = farCenter + farRight + farTop;
m_LeftFarBottom = farCenter - farRight - farTop;
m_LeftFarTop = farCenter - farRight + farTop;
}
Also flipped the z coordinates when creating the ortho projection as follows:
return glm::ortho(min.x, max.x, min.y, max.y, -min.z, -max.z);
Yet still nothing renders to the depth map. Any ideas?
Here's captured results as you can see top left corner quad shows the shadow map which is completely wrong even drawing shadows on the objects themselves as a result as can be seen:
https://gfycat.com/brightwealthybass
(The smearing of the shadow map values is just an artifact of the gif compresser I used it doesn't really happen so there's no problem of me not clearing the z-buffer of the FBO)
EDIT2::
Ok few things GetFov() returned degrees and not radians.. changed it.
I Also try the transformation from NDC to world space with the following code:
glm::mat4 inverseProjectViewMatrix = glm::inverse(camera.GetProjection() * camera.GetView());
std::array<glm::vec4, 8> NDC =
{
glm::vec4{-1.0f, -1.0f, -1.0f, 1.0f},
glm::vec4{1.0f, -1.0f, -1.0f, 1.0f},
glm::vec4{-1.0f, 1.0f, -1.0f, 1.0f},
glm::vec4{1.0f, 1.0f, -1.0f, 1.0f},
glm::vec4{-1.0f, -1.0f, 1.0f, 1.0f},
glm::vec4{1.0f, -1.0f, 1.0f, 1.0f},
glm::vec4{-1.0f, 1.0f, 1.0f, 1.0f},
glm::vec4{1.0f, 1.0f, 1.0f, 1.0f},
};
for (size_t i = 0; i < NDC.size(); i++)
{
NDC[i] = inverseProjectViewMatrix * NDC[i];
NDC[i] /= NDC[i].w;
}
For the far coordinates of the frustum they're equal to my calculation of the frustum, but for the near corners they're off as if my calculation of the near corners is halved by 2 (for x and y only).
For example:
RIGHT TOP NEAR CORNER:
my calculation yields - {0.055, 0.041, 2.9}
inverse NDC yields - {0.11, 0.082, 2.8}
So I'm not sure where my calculation got wrong, maybe you could point out?
Even with the inversed NDC coordinates I tried to use them as following:
glm::mat4 DirectionalLight::GetProjection(const Camera& camera) const
{
glm::mat4 lightView = GetView();
glm::mat4 inverseProjectViewMatrix = glm::inverse(camera.GetProjection() * camera.GetView());
std::array<glm::vec4, 8> NDC =
{
glm::vec4{-1.0f, -1.0f, 0.0f, 1.0f},
glm::vec4{1.0f, -1.0f, 0.0f, 1.0f},
glm::vec4{-1.0f, 1.0f, 0.0f, 1.0f},
glm::vec4{1.0f, 1.0f, 0.0f, 1.0f},
glm::vec4{-1.0f, -1.0f, 1.0f, 1.0f},
glm::vec4{1.0f, -1.0f, 1.0f, 1.0f},
glm::vec4{-1.0f, 1.0f, 1.0f, 1.0f},
glm::vec4{1.0f, 1.0f, 1.0f, 1.0f},
};
for (size_t i = 0; i < NDC.size(); i++)
{
NDC[i] = lightView * inverseProjectViewMatrix * NDC[i];
NDC[i] /= NDC[i].w;
}
glm::vec3 min{ INFINITY, INFINITY, INFINITY };
glm::vec3 max{ -INFINITY, -INFINITY, -INFINITY };
for (unsigned int i = 0; i < NDC.size(); i++)
{
if (NDC[i].x < min.x)
min.x = NDC[i].x;
if (NDC[i].y < min.y)
min.y = NDC[i].y;
if (NDC[i].z < min.z)
min.z = NDC[i].z;
if (NDC[i].x > max.x)
max.x = NDC[i].x;
if (NDC[i].y > max.y)
max.y = NDC[i].y;
if (NDC[i].z > max.z)
max.z = NDC[i].z;
}
return glm::ortho(min.x, max.x, min.y, max.y, min.z, max.z);
}
And still got bad result:
https://gfycat.com/negativemalealtiplanochinchillamouse
Let's start with your frustum calculation here:
float height = tanf(camera->GetFov() / 2.0f) * camera->GetNear();
[...]
glm::vec3 nearTop = camera->GetUp() * camera->GetNear() * height;
[...]
glm::vec3 farTop = camera->GetUp() * camera->GetFar() * height;
That's one to many GetNear in your multiplications. Conceptually, you could height represent half of the frustum height at unit distance (I still would name it differently) without projecting it to the near plane, then the rest of your formulas make more sense.
However, the whole approach is doubtful to begin with. To get the frustum corners in world space, you can simply unproject all 8 vertices of the [-1,1]^3 NDC cube. Since you want to transform that into your light space, you can even combine it to a single matrix m = lightView * inverse(projection * view), just don't forget the perspective divide after the multiplying the NDC cube vertices.
return glm::ortho(min.x, max.x, min.y, max.y, min.z, max.z);
Standard GL conventions use a view space where the camera is looking into negative z direction, but the zNear and zFar parameters are interpreted as distances along the viewing directions, so the actual viewing volume will range from -zFar, -zNear in view space. You'll have to flip the signs of your z dimension to get the actual bounding box you're looking for.
I'm attempting to rotate a cube around an axis and it's definitely behaving incorrectly. I'm assuming the problem lies in my matrix rotation code as everything else seems to be working. I can translate the model correctly along the x, y or z axis, as well as scale. My camera view matrix is working as expected as well and so is my projection matrix. If I remove the view matrix and or the projection matrix implementations the problem remains.
If you wish to see what result I'm getting, it's the exact same output as the gif shown on this stackoverflow post: Rotating a cube in modern opengl... looks strange
The cube appears to fold in on itself while rotating, then returns to normal after a full rotation and seems to rotate fine for about 20 degrees until folding in on itself again and repeating. My issue is the same as that in the linked to article, however my matrix class is not the same, so my problem, though the same, seemingly has a different solution.
Here's my stripped matrix declaration with possibly relevant operators
math.h
typedef struct matrix4x4
{
//Elements stored in ROW MAJOR ORDER
GLfloat matrix[16];
void translate(Vector3f translation);
void rotateX(GLfloat angle);
void rotateY(GLfloat angle);
void rotateZ(GLfloat angle);
void rotate(Vector3f angles);
void scale(Vector3f scales);
void scale(GLfloat scale);
inline matrix4x4& operator*=(const matrix4x4& rhs)
{
this->matrix[0] = this->matrix[0] * rhs.matrix[0] + this->matrix[1] * rhs.matrix[4] + this->matrix[2] * rhs.matrix[8] + this->matrix[3] * rhs.matrix[12];
this->matrix[1] = this->matrix[0] * rhs.matrix[1] + this->matrix[1] * rhs.matrix[5] + this->matrix[2] * rhs.matrix[9] + this->matrix[3] * rhs.matrix[13];
this->matrix[2] = this->matrix[0] * rhs.matrix[2] + this->matrix[1] * rhs.matrix[6] + this->matrix[2] * rhs.matrix[10] + this->matrix[3] * rhs.matrix[14];
this->matrix[3] = this->matrix[0] * rhs.matrix[3] + this->matrix[1] * rhs.matrix[7] + this->matrix[2] * rhs.matrix[11] + this->matrix[3] * rhs.matrix[15];
this->matrix[4] = this->matrix[4] * rhs.matrix[0] + this->matrix[5] * rhs.matrix[4] + this->matrix[6] * rhs.matrix[8] + this->matrix[7] * rhs.matrix[12];
this->matrix[5] = this->matrix[4] * rhs.matrix[1] + this->matrix[5] * rhs.matrix[5] + this->matrix[6] * rhs.matrix[9] + this->matrix[7] * rhs.matrix[13];
this->matrix[6] = this->matrix[4] * rhs.matrix[2] + this->matrix[5] * rhs.matrix[6] + this->matrix[6] * rhs.matrix[10] + this->matrix[7] * rhs.matrix[14];
this->matrix[7] = this->matrix[4] * rhs.matrix[3] + this->matrix[5] * rhs.matrix[7] + this->matrix[6] * rhs.matrix[11] + this->matrix[7] * rhs.matrix[15];
this->matrix[8] = this->matrix[8] * rhs.matrix[0] + this->matrix[9] * rhs.matrix[4] + this->matrix[10] * rhs.matrix[8] + this->matrix[11] * rhs.matrix[12];
this->matrix[9] = this->matrix[8] * rhs.matrix[1] + this->matrix[9] * rhs.matrix[5] + this->matrix[10] * rhs.matrix[9] + this->matrix[11] * rhs.matrix[13];
this->matrix[10] = this->matrix[8] * rhs.matrix[2] + this->matrix[9] * rhs.matrix[6] + this->matrix[10] * rhs.matrix[10] + this->matrix[11] * rhs.matrix[14];
this->matrix[11] = this->matrix[8] * rhs.matrix[3] + this->matrix[9] * rhs.matrix[7] + this->matrix[10] * rhs.matrix[11] + this->matrix[11] * rhs.matrix[15];
this->matrix[12] = this->matrix[12] * rhs.matrix[0] + this->matrix[13] * rhs.matrix[4] + this->matrix[14] * rhs.matrix[8] + this->matrix[15] * rhs.matrix[12];
this->matrix[13] = this->matrix[12] * rhs.matrix[1] + this->matrix[13] * rhs.matrix[5] + this->matrix[14] * rhs.matrix[9] + this->matrix[15] * rhs.matrix[13];
this->matrix[14] = this->matrix[12] * rhs.matrix[2] + this->matrix[13] * rhs.matrix[6] + this->matrix[14] * rhs.matrix[10] + this->matrix[15] * rhs.matrix[14];
this->matrix[15] = this->matrix[12] * rhs.matrix[3] + this->matrix[13] * rhs.matrix[7] + this->matrix[14] * rhs.matrix[11] + this->matrix[15] * rhs.matrix[15];
return *this;
}
}matrix4x4;
matrix4x4 createTransformationMatrix(Vector3f translation, Vector3f rotation, Vector3f scale);
matrix4x4 createPerspectiveProjectionMatrix(GLfloat width, GLfloat height, GLfloat fov, GLfloat nearPlane, GLfloat farPlane);
matrix4x4 createViewMatrix(Vector3f cameraPosition, GLfloat cameraPitch, GLfloat cameraYaw, GLfloat cameraRoll);
and it's relevant implementations
math.cpp
matrix4x4::matrix4x4(GLfloat elements[])
{
//Elements stored in ROW MAJOR ORDER
for (unsigned int i = 0; i <= elementCount; i++)
{
matrix[i] = elements[i];
}
}
void matrix4x4::setIdentity()
{
std::fill(matrix, matrix + sizeof(matrix) / sizeof(GLfloat), 0.0f);
matrix[0] = 1;
matrix[5] = 1;
matrix[10] = 1;
matrix[15] = 1;
}
/*/////////////////////////////////////////////////////
math
/////////////////////////////////////////////////////*/
void matrix4x4::translate(Vector3f translation)
{
GLfloat transformElements[16] =
{
1.0f, 0.0f, 0.0f, translation.x,
0.0f, 1.0f, 0.0f, translation.y,
0.0f, 0.0f, 1.0f, translation.z,
0.0f, 0.0f, 0.0f, 1.0f
};
matrix4x4 transform = matrix4x4(transformElements);
*this *= transform;
}
void matrix4x4::rotateX(GLfloat angle)
{
angle = degreesToRadians(angle);
GLfloat transformElements[16] =
{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, std::cos(-angle), -std::sin(-angle), 0.0f,
0.0f, std::sin(-angle), std::cos(-angle), 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
matrix4x4 transform = matrix4x4(transformElements);
*this *= transform;
}
void matrix4x4::rotateY(GLfloat angle)
{
angle = degreesToRadians(angle);
GLfloat transformElements[16] =
{
std::cos(-angle), 0.0f, std::sin(-angle), 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
-std::sin(-angle), 0.0f, std::cos(-angle), 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
matrix4x4 transform = matrix4x4(transformElements);
*this *= transform;
}
void matrix4x4::rotateZ(GLfloat angle)
{
angle = degreesToRadians(angle);
GLfloat transformElements[16] =
{
std::cos(-angle), -std::sin(-angle), 0.0f, 0.0f,
std::sin(-angle), std::cos(-angle), 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
matrix4x4 transform = matrix4x4(transformElements);
*this *= transform;
}
void matrix4x4::rotate(Vector3f angles)
{
matrix4x4 transform = matrix4x4();
transform.setIdentity();
transform.rotateX(angles.x);
transform.rotateY(angles.y);
transform.rotateZ(angles.z);
*this *= transform;
}
void matrix4x4::scale(Vector3f scales)
{
GLfloat transformElements[16] =
{
scales.x, 0.0f, 0.0f, 0.0f,
0.0f, scales.y, 0.0f, 0.0f,
0.0f, 0.0f, scales.z, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
matrix4x4 transform = matrix4x4(transformElements);
*this *= transform;
}
matrix4x4 createTransformationMatrix(Vector3f translation, Vector3f rotation, Vector3f scale)
{
matrix4x4 transformationMatrix;
transformationMatrix.setIdentity();
//I've tried changing the order of these around, as well as only
//doing one operation (skipping translate and scale, or everything but a single axis rotation
transformationMatrix.translate(translation);
transformationMatrix.rotate(rotation);
transformationMatrix.scale(scale);
return transformationMatrix;
}
matrix4x4 createPerspectiveProjectionMatrix(GLfloat width, GLfloat height, GLfloat fov, GLfloat nearPlane, GLfloat farPlane)
{
matrix4x4 projectionMatrix;
projectionMatrix.setIdentity();
GLfloat aspectRatio = width / height;
projectionMatrix.matrix[0] = (1.0f / std::tan((degreesToRadians(fov)) / 2.0f) / aspectRatio);
projectionMatrix.matrix[5] = 1.0f / std::tan((degreesToRadians(fov)) / 2.0f);
projectionMatrix.matrix[10] = (farPlane + nearPlane) / (nearPlane - farPlane);
projectionMatrix.matrix[11] = (2.0f * farPlane * nearPlane) / (nearPlane - farPlane);
projectionMatrix.matrix[14] = -1.0f;
return projectionMatrix;
}
I know my matrix/vector implementations are quick and dirty, but I'm just trying to get something set up. I've got plans to make the math methods (scale, translate, etc) static methods that don't affect the contents of the matrix, but instead accept a matrix as input and return a new one... but that's not the issue right now.
Here's my vertex shader
#version 330 core
//declare inputs
in vec3 position;
in vec2 textureCoords;
//declare output
out vec2 pass_textureCoords;
//uniforms
uniform mat4 transformationMatrix;
uniform mat4 projectionMatrix;
uniform mat4 viewMatrix;
void main(void)
{
//tell OpenGL where to render the vertex on screen
gl_Position = projectionMatrix * viewMatrix * transformationMatrix * vec4(position.x, position.y, position.z, 1.0);
pass_textureCoords = textureCoords;
}
My render method...
void Renderer::render(Entity entity, Shader* shader)
{
...
RawModel* rawModel = texturedModel->getRawModel();
glBindVertexArray(rawModel->getVaoID());
...
matrix4x4 transformationMatrix = createTransformationMatrix(entity.getPosition(), entity.getRotation(), entity.getScale());
shader->loadTransformationMatrix(transformationMatrix);
...
glDrawElements(GL_TRIANGLES, rawModel->getVertexCount(), GL_UNSIGNED_INT, 0);
...
}
And finally the relevant pieces from my main. The cube definitions and so on
//This is a simple cube
std::vector<GLfloat> vertices =
{
-0.5f,0.5f,-0.5f,
-0.5f,-0.5f,-0.5f,
0.5f,-0.5f,-0.5f,
0.5f,0.5f,-0.5f,
-0.5f,0.5f,0.5f,
-0.5f,-0.5f,0.5f,
0.5f,-0.5f,0.5f,
0.5f,0.5f,0.5f,
0.5f,0.5f,-0.5f,
0.5f,-0.5f,-0.5f,
0.5f,-0.5f,0.5f,
0.5f,0.5f,0.5f,
-0.5f,0.5f,-0.5f,
-0.5f,-0.5f,-0.5f,
-0.5f,-0.5f,0.5f,
-0.5f,0.5f,0.5f,
-0.5f,0.5f,0.5f,
-0.5f,0.5f,-0.5f,
0.5f,0.5f,-0.5f,
0.5f,0.5f,0.5f,
-0.5f,-0.5f,0.5f,
-0.5f,-0.5f,-0.5f,
0.5f,-0.5f,-0.5f,
0.5f,-0.5f,0.5f
};
std::vector<GLfloat> textureCoords =
{
...
};
std::vector<GLuint> indices =
{
0,1,3,
3,1,2,
4,5,7,
7,5,6,
8,9,11,
11,9,10,
12,13,15,
15,13,14,
16,17,19,
19,17,18,
20,21,23,
23,21,22
};
//parameters are (model, pos, rotation, scale)
Entity entity = Entity(&texturedModel, Vector3f(0.0f, 0.0f, -2.0f), Vector3f(0.0f, 0.0f, 0.0f), 1.0f);
//SHADER STUFF
Shader textureShader = Shader("uniformVarTextureShader");
textureShader.loadProjectionMatrix(display.getProjectionMatrix());
Camera cam;
//draw in wireframe mode
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
//glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
while (display.checkForClose() == 0)
{
glfwPollEvents();
//TO DO: update logic here
//entity.varyPosition(+0.005f, 0.0f, -0.002f); //this works, as does scaling and camera movement
//entity.varyRotation(0.25f, 0.18f, 0.0f);
entity.setYRotation(entity.getYRotation() + 0.25f); //any sort of rotation operation ends up with the strange behaivor
//rendering commands here
display.prepare();
textureShader.bind();
textureShader.loadViewMatrix(cam);
display.render(entity, &textureShader);
textureShader.stop();
display.swapBuffers();
}
So, to recap; I'm not having any issues with translating, scaling, "camera movement" and the projection matrix appears to work as well. Any time I attempt to rotate however, I get the exact same behavior as the linked to article above.
Final notes: I have depth testing enabled and clear the depth buffer each frame. I also pass GL_TRUE to transpose any matrix data I give to glUniformMatrix4fv. I've checked the locations of each of the uniforms and they are passing correctly; 0, 1 and 2 respectively. No -1.
I'm stumped, any help would be appreciated. I can post more code if need be, but I'm pretty sure this covers the entirety of where the problem most likely lies. Thanks again
The major issue is the matrix multipolication operation.
Since you manipulate the matrix (you read from the matrix and you write to it), are some elements already manipulated, before you read it.
e.g. In the first line this->matrix[0] is written to
this->matrix[0] = this->matrix[0] * rhs.matrix[0] + this->matrix[1] * rhs.matrix[4] + this->matrix[2] * rhs.matrix[8] + this->matrix[3] * rhs.matrix[12];
and in the second line this->matrix[0] is read again:
this->matrix[1] = this->matrix[0] * rhs.matrix[1] + this->matrix[1] * rhs.matrix[5] + this->matrix[2] * rhs.matrix[9] + this->matrix[3] * rhs.matrix[13];
Copy the matrix array to a local variable, to solve the issue:
matrix4x4& operator*=(const matrix4x4& rhs)
{
matrix4x4 act( this->matrix );
this->matrix[0] = act.matrix[0] * rhs.matrix[0] + act.matrix[1] * rhs.matrix[4] + act.matrix[2] * rhs.matrix[8] + act.matrix[3] * rhs.matrix[12];
this->matrix[1] = act.matrix[0] * rhs.matrix[1] + act.matrix[1] * rhs.matrix[5] + act.matrix[2] * rhs.matrix[9] + act.matrix[3] * rhs.matrix[13];
....
return *this;
}
By the way, since you multiply a vector to the matrix from the right, in the shader
gl_Position = projectionMatrix * viewMatrix * transformationMatrix * vec4(position.x, position.y, position.z, 1.0);
the matrix has to be initilized in column major order:
mat4 m44 = mat4(
vec4( Xx, Xy, Xz, 0.0),
vec4( Yx, Xy, Yz, 0.0),
vec4( Zx Zy Zz, 0.0),
vec4( Tx, Ty, Tz, 1.0) );
Note your matrices are initialized in row major order e.g. matrix4x4::translate:
GLfloat transformElements[16] =
{
1.0f, 0.0f, 0.0f, translation.x,
0.0f, 1.0f, 0.0f, translation.y,
0.0f, 0.0f, 1.0f, translation.z,
0.0f, 0.0f, 0.0f, 1.0f
};
So you have to transpose the matrix when you set it to the uniform glUniformMatrix4fv:
glUniformMatrix4fv( ..., ..., GL_TRUE, ... );
I'm doing cascaded shadow maps, and I believe I have a problem concerning the way I do the split comparison to select the proper shadow map. As it stands, the shadow mapping works overall but in a few cases at certain angles it does not work.
Currently the lighting shader stage looks like this:
"#version 420
const float DEPTH_BIAS = 0.00005;
layout(std140) uniform UnifDirLight
{
mat4 mVPMatrix[4];
mat4 mCamViewMatrix;
vec4 mSplitDistance;
vec4 mLightColor;
vec4 mLightDir;
vec4 mGamma;
vec2 mScreenSize;
} UnifDirLightPass;
layout (binding = 2) uniform sampler2D unifPositionTexture;
layout (binding = 3) uniform sampler2D unifNormalTexture;
layout (binding = 4) uniform sampler2D unifDiffuseTexture;
layout (binding = 6) uniform sampler2DArrayShadow unifShadowTexture;
out vec4 fragColor;
void main()
{
vec2 texcoord = gl_FragCoord.xy / UnifDirLightPass.mScreenSize;
vec3 worldPos = texture(unifPositionTexture, texcoord).xyz;
vec3 normal = normalize(texture(unifNormalTexture, texcoord).xyz);
vec3 diffuse = texture(unifDiffuseTexture, texcoord).xyz;
vec4 camPos = UnifDirLightPass.mCamViewMatrix * vec4(worldPos, 1.0); // legit way of determining the split?
int index = 3;
if (camPos .z > UnifDirLightPass.mSplitDistance.x)
index = 0;
else if (camPos .z > UnifDirLightPass.mSplitDistance.y)
index = 1;
else if (camPos .z > UnifDirLightPass.mSplitDistance.z)
index = 2;
vec4 projCoords = UnifDirLightPass.mVPMatrix[index] * vec4(worldPos, 1.0);
projCoords.w = projCoords.z - DEPTH_BIAS;
projCoords.z = float(index);
float visibilty = texture(unifShadowTexture, projCoords);
float angleNormal = clamp(dot(normal, UnifDirLightPass.mLightDir.xyz), 0, 1);
fragColor = vec4(diffuse, 1.0) * visibilty * angleNormal * UnifDirLightPass.mLightColor;
}
And the "mSplitDistance", each component is the center fardistance of the frustrum for that split, multiplied by the main cameras view matrix
Vec4 camFarDistCenter;
CameraFrustrum cameraFrustrum = CalculateCameraFrustrum(nearDistArr[cascadeIndex], farDistArr[cascadeIndex], lighting.mCameraPosition, lighting.mCameraDirection, camFarDistCenter);
.....
camFarDistCenter = lighting.mCameraViewMatrix * camFarDistCenter;
splitDistances[cascadeIndex] = camFarDistCenter.z;
Here's how I create the camera frustrum for each split, if its of interest, I believe this is a pretty common alghorithm:
CameraFrustrum CalculateCameraFrustrum(const float minDist, const float maxDist, const Vec3& cameraPosition, const Vec3& cameraDirection, Vec4& camFarZ)
{
CameraFrustrum ret = { Vec4(-1.0f, -1.0f, 1.0f, 1.0f), Vec4(-1.0f, -1.0f, -1.0f, 1.0f), Vec4(-1.0f, 1.0f, 1.0f, 1.0f), Vec4(-1.0f, 1.0f, -1.0f, 1.0f),
Vec4(1.0f, -1.0f, 1.0f, 1.0f), Vec4(1.0f, -1.0f, -1.0f, 1.0f), Vec4(1.0f, 1.0f, 1.0f, 1.0f), Vec4(1.0f, 1.0f, -1.0f, 1.0f) };
const Vec3 forwardVec = glm::normalize(cameraDirection);
const Vec3 rightVec = glm::normalize(glm::cross(forwardVec, Vec3(0.0f, 0.0f, 1.0f)));
const Vec3 upVec = glm::normalize(glm::cross(rightVec, forwardVec));
const Vec3 nearCenter = cameraPosition + forwardVec * minDist;
const Vec3 farCenter = cameraPosition + forwardVec * maxDist;
camFarZ = Vec4(farCenter, 1.0);
const float nearHeight = tan(glm::radians(70.0f) / 2.0f) * minDist;
const float nearWidth = nearHeight * 1920.0f / 1080.0f;
const float farHeight = tan(glm::radians(70.0f) / 2.0f) * maxDist;
const float farWidth = farHeight * 1920.0f / 1080.0f;
ret[0] = Vec4(nearCenter - (upVec * nearHeight) - (rightVec * nearWidth), 1.0);
ret[1] = Vec4(nearCenter + (upVec * nearHeight) - (rightVec * nearWidth), 1.0);
ret[2] = Vec4(nearCenter + (upVec * nearHeight) + (rightVec * nearWidth), 1.0);
ret[3] = Vec4(nearCenter - (upVec * nearHeight) + (rightVec * nearWidth), 1.0);
ret[4] = Vec4(farCenter - upVec * farHeight - rightVec * farWidth, 1.0);
ret[5] = Vec4(farCenter + upVec * farHeight - rightVec * farWidth, 1.0);
ret[6] = Vec4(farCenter + upVec * farHeight + rightVec * farWidth, 1.0);
ret[7] = Vec4(farCenter - upVec * farHeight + rightVec * farWidth, 1.0);
return ret;
}
Is it sound to do the split comparison in camera space like I do? Is that a potential problem?
void CalculateCameraFrustrum(glm::mat4& projectionMatrix,
glm::mat4 viewMatrix, // viewMatrix = light POV
glm::vec3 camera // camera = eye position + eye direction
float zNear,
float zFar,
glm::vec4 point[4]) // point[4] = shadow map boundaries
glm::mat4 shadMvp = projectionMatrix * (viewMatrix * glm::translate(camera));
glm::vec3 transf;
float maxX = zNear, minX = zFar,
maxY = zNear, minY = zFar;
int i = -1;
while (++i < 4)
{
transf = shadMvp * point[i];
transf.x /= transf.w;
transf.y /= transf.w;
if(transf.x > maxX)
maxX = transf.x;
if(transf.x < minX)
minX = transf.x;
if(transf.y > maxY)
maxY = transf.y;
if(transf.y < minY)
minY = transf.y;
}
float scaleX = 2.0f / (maxX - minX),
scaleY = 2.0f / (maxY - minY),
offsetX = -0.5f * (maxX + minX) * scaleX,
offsetY = -0.5f * (maxY + minY) * scaleY;
shadMvp = glm::mat4(1); // Identity matrix
shadMvp[0][0] = scaleX;
shadMvp[1][1] = scaleY;
shadMvp[0][3] = offsetX;
shadMvp[1][3] = offsetY;
projectionMatrix *= shadMvp;
} // No need to calculate view frustum splitting,
// only the boundaries of the shadow maps levels are needed (glm::ortho(...)).
// :)