Related
I am working on a custom 3D renderer engine based on Javidx9 3D engine tutorial. I'm trying to get some collision detection with rays and segments and it's not properly working.
I've tried implementing different methods trying to make it work the way I want it to but either the function is not working or its the way im using the function after it has been implemented. The current algorithm I am using is based on "Real-Time Collision Detection", by Christer Ericson, Pg 191. However, when the program is run I am getting no triangles intercepted triangles (the return vector is always zero). Is there something wrong with my code?
Note: I have some predefined functions that are self-explanatory based on the name.
#include <iostream>
#include <fstream>
#include <strstream>
#include <algorithm>
#include <string>
#include <vector>
#define SMALL_NUM 0.00000001 // anything that avoids division overflow
using namespace std;
// Created a 2D structure to hold texture coordinates
struct vec2d
{
float u = 0;
float v = 0;
float w = 1;
};
struct vec3d
{
float x = 0;
float y = 0;
float z = 0;
float w = 1; // Need a 4th term to perform sensible matrix vector multiplication
bool operator==(vec3d a) const
{
if (a.x == x && a.y == y && a.z == z && a.w == w)
return true;
else
return false;
}
vec3d operator+(const vec3d& a) const
{
return vec3d{ a.x + x, a.y + y, a.z + z, w };
}
vec3d operator-(const vec3d& a) const
{
return vec3d{ a.x - x, a.y - y, a.z - z, w };
}
vec3d operator*(const vec3d& a) const
{
return vec3d{ a.x * x, a.y * y, a.z * z, w };
}
vec3d operator/(const vec3d& a) const
{
return vec3d{ a.x / x, a.y / y, a.z / z, w };
}
vec3d operator+(const float& a) const
{
return vec3d{ a + x, a + y, a + z, w };
}
vec3d operator-(const float& a) const
{
return vec3d{ a - x, a - y, a - z, w };
}
vec3d operator*(const float& a) const
{
return vec3d{ a * x, a * y, a * z, w };
}
vec3d operator/(const float& a) const
{
return vec3d{ a / x, a / y, a / z, w };
}
};
struct triangle
{
vec3d p[3];
vec2d t[3]; // added a texture coord per vertex
int triid = 0;
wchar_t sym;
short col;
bool calculated = false;
};
struct mesh
{
vector<triangle> tris;
};
struct collision
{
bool plane = false;
triangle tris;
vec3d points;
};
float Vector_DotProduct(vec3d v1, vec3d v2)
{
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z;
}
float Vector_Length(vec3d v)
{
return sqrtf(Vector_DotProduct(v, v));
}
vec3d Vector_Normalise(vec3d v)
{
float l = Vector_Length(v);
return { v.x / l, v.y / l, v.z / l };
}
vec3d Vector_CrossProduct(vec3d v1, vec3d v2)
{
vec3d v;
v.x = v1.y * v2.z - v1.z * v2.y;
v.y = v1.z * v2.x - v1.x * v2.z;
v.z = v1.x * v2.y - v1.y * v2.x;
return v;
}
vector<collision> CollisionSegment(vec3d ray, vec3d rayend, mesh* m)
{
vector<collision> ret; // The return varaible
vec3d raydir = Vector_Normalise(rayend - ray); // The ray direction
for (auto tri : m->tris)
{
vec3d ab = tri.p[1] - tri.p[0];
vec3d ac = tri.p[2] - tri.p[0];
vec3d qp = rayend - ray;
vec3d n = Vector_CrossProduct(ab, ac);
float d = Vector_DotProduct(qp, n);
if (d <= SMALL_NUM) continue;
vec3d ap = ray - tri.p[0];
float t = Vector_DotProduct(ap, n);
if (t < SMALL_NUM) continue;
if (t < d) continue;
vec3d e = Vector_CrossProduct(qp, ap);
float v = Vector_DotProduct(ac, e);
if (v < SMALL_NUM || v > d) continue;
float w = -Vector_DotProduct(ab, e);
if (w < SMALL_NUM || v + w > d) continue;
float ood = 1.0f / d;
t *= ood;
v *= ood;
w *= ood;
float u = 1.0f - v - w;
ret.push_back(collision{ false, tri, vec3d() });
}
return ret;
}
int main()
{
mesh meshCube;
meshCube.tris = {
// SOUTH
{ 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,},
{ 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,},
// EAST
{ 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,},
{ 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,},
// NORTH
{ 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,},
{ 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,},
// WEST
{ 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,},
{ 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,},
// TOP
{ 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,},
{ 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,},
// BOTTOM
{ 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f,},
{ 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 1.0f, 1.0f,},
};
vec3d p1 = { -1.5f, -1.5f, -1.5f };
vec3d p2 = { 1.5f, 1.5f, 1.5f };
vector<collision> hits = CollisionSegment(p1, p2, &meshCube);
return 0;
}
What I want is whenever I look at a mesh I want a vector of the triangles that come between me and my camera segment.
I am porting my OpenGL 1.1 application to OpenGL ES 2.0 and am writing a wrapper to implement the OpenGL 1.1 functions. My code seems to work fine until I start calling glPushMatrix() and glPopMatrix(). I think my understanding of how these should be implemented is incorrect.
Do I compute the final rotate/translate/scale before pushing it back on the stack? Should I keep only one modelview matrix (instead of separating it into three)? Are the transforms applied in the correct order?
Here is the code for my tranformation matrices
static std::vector<GLfloat> vertices;
static std::vector<std::vector<GLfloat>> rotationMatrixStack;
static std::vector<std::vector<GLfloat>> scalingMatrixStack;
static std::vector<GLfloat> rotationMatrix =
{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
static std::vector<GLfloat> scalingMatrix =
{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
static std::vector<GLfloat> translationMatrix =
{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
static std::vector<GLfloat> orthographicMatrix =
{
.0025f, 0.0f, 0.0f, -1.0f,
0.0f, .0025f, 0.0f, -1.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
void glTranslatef (GLfloat x, GLfloat y, GLfloat z)
{
float translation[] =
{
1.0f, 0.0f, 0.0f, x,
0.0f, 1.0f, 0.0f, y,
0.0f, 0.0f, 1.0f, z,
0.0f, 0.0f, 0.0f, 1.0f
};
multiplyMatrix(translation , &translationMatrix[0], &translationMatrix[0]);
}
void glScalef (GLfloat x, GLfloat y, GLfloat z)
{
float scaling[] =
{
x, 0.0f, 0.0f, 0.0f,
0.0f, y, 0.0f, 0.0f,
0.0f, 0.0f, z, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
multiplyMatrix(scaling , &scalingMatrix[0], &scalingMatrix[0]);
}
void glRotatef (GLfloat angle, GLfloat x, GLfloat y, GLfloat z)
{
glTranslatef(-x, -y, -z);
GLfloat radians = angle * M_PI/180;
float zRotation[] =
{
cos(radians), -sin(radians), 0.0f, 0.0f,
sin(radians), cos(radians), 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
multiplyMatrix(zRotation , &rotationMatrix[0], &rotationMatrix[0]);
glTranslatef(x,y,z);
}
void glLoadIdentity (void)
{
rotationMatrix, scalingMatrix, translationMatrix =
{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f
};
}
void multiplyMatrix(float* a, float* b, float* product)
{
int a_heigth = 4;
int a_width = 4;
int b_heigth = 4;
int b_width = 4;
int product_heigth = a_heigth;
int product_width = b_width;
float intermediateMatrix[product_heigth * product_width] = {0};
for (int product_row = 0; product_row < product_heigth; product_row++)
{
for (int product_column = 0; product_column < product_width; product_column++)
{
float value = 0;
//std::cout << "r[" << (product_row*product_width) + product_column << "] = ";
for (int multiplication_index = 0; multiplication_index < a_width ; multiplication_index++)
{
value += a[(product_row * a_width) + multiplication_index] * b[product_column + (b_heigth * multiplication_index)];
//std::cout << "( a[" << (product_row * a_width) + multiplication_index << "] * b[" << product_column + (b_heigth * multiplication_index) << "] ) + ";
}
//std::cout << std::endl;
intermediateMatrix[(product_row*product_width) + product_column] = value;
}
}
for (int i = 0; i < product_heigth * product_width; i++)
{
product[i] = intermediateMatrix[i];
}
}
Here is the code for the matrix stack
static std::vector<std::vector<GLfloat>> translationMatrixStack;
void glPushMatrix()
{
rotationMatrixStack.push_back(rotationMatrix);
scalingMatrixStack.push_back(scalingMatrix);
translationMatrixStack.push_back(translationMatrix);
}
void glPopMatrix()
{
rotationMatrix = rotationMatrixStack.back();
scalingMatrix = scalingMatrixStack.back();
translationMatrix = translationMatrixStack.back();
rotationMatrixStack.pop_back();
scalingMatrixStack.pop_back();
translationMatrix.pop_back();
}
And here is the vertex shader code
attribute highp vec4 myVertex;
uniform mediump mat4 orthographicMatrix;
uniform mediump mat4 translationMatrix;
uniform mediump mat4 scalingMatrix;
uniform mediump mat4 rotationMatrix;
void main(void)
{
gl_Position = orthographicMatrix * translationMatrix * scalingMatrix * rotationMatrix * ( myVertex) ;
}";
You do not have a separate matrix stack for rotation, translation and scaling. In OpenGL there is one matrix stack for each matrix mode (See glMatrixMode). The matrix modes are GL_MODELVIEW, GL_PROJECTION, and GL_TEXTURE.
See the documentation of glTranslate:
glTranslate produces a translation by x y z . The current matrix (see glMatrixMode) is multiplied by this translation matrix, with the product replacing the current matrix.
the documentation of glRotate:
glRotate produces a rotation of angle degrees around the vector x y z . The current matrix (see glMatrixMode) is multiplied by a rotation matrix with the product replacing the current matrix.
and the documentation of glScale:
glScaleproduces a nonuniform scaling along the x, y, and z axes. The three parameters indicate the desired scale factor along each of the three axes.
The current matrix (see glMatrixMode) is multiplied by this scale matrix.
This means you need one matrix stack, and all operations operate on the same matrix stack.
Note, a matrix multiplication C = A * B works like this:
Matrix4x4 A, B, C;
// C = A * B
for ( int k = 0; k < 4; ++ k )
for ( int j = 0; j < 4; ++ j )
C[k][j] = A[0][l] * B[k][0] + A[1][j] * B[k][1] + A[2][j] * B[k][2] + A[3][j] * B[k][3];
A 4*4 matrix looks like this:
c0 c1 c2 c3 c0 c1 c2 c3
[ Xx Yx Zx Tx ] [ 0 4 8 12 ]
[ Xy Yy Zy Ty ] [ 1 5 9 13 ]
[ Xz Yz Zz Tz ] [ 2 6 10 14 ]
[ 0 0 0 1 ] [ 3 7 11 15 ]
And the memory image of a 4*4 matrix looks like this:
[ Xx, Xy, Xz, 0, Yx, Yy, Yz, 0, Zx, Zy, Zz, 0, Tx, Ty, Tz, 1 ]
This means you have to adapt your matrix operations:
static std::vector<std::vector<GLfloat>> modelViewMatrixStack;
static std::vector<GLfloat> modelViewMatrix{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f };
void multiplyMatrix( float A[], float B[], float P[] )
{
float C[16];
for ( int k = 0; k < 4; ++ k ) {
for ( int l = 0; l < 4; ++ l ) {
C[k*4+j] =
A[0*4+j] * B[k*4+0] +
A[1*4+j] * B[k*4+1] +
A[2*4+j] * B[k*4+2] +
A[3*4+j] * B[k*4+3];
}
}
std::copy(C, C+16, P);
}
void glTranslatef( GLfloat x, GLfloat y, GLfloat z )
{
float translation[]{
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
x, y, z, 1.0f };
multiplyMatrix(&modelViewMatrix[0], translation, &modelViewMatrix[0]);
}
void glScalef( GLfloat x, GLfloat y, GLfloat z )
{
float scaling[]{
x, 0.0f, 0.0f, 0.0f,
0.0f, y, 0.0f, 0.0f,
0.0f, 0.0f, z, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f };
multiplyMatrix(&modelViewMatrix[0], scaling, &modelViewMatrix[0]);
}
void glRotatef( GLfloat angle, GLfloat x, GLfloat y, GLfloat z )
{
float radians = angle * M_PI/180;
float c = cos(radians);
float s = sin(radians);
float rotation[16]{
x*x*(1.0f-c)+c, x*y*(1.0f-c)-z*s, x*z*(1.0f-c)+y*s, 0.0f,
y*x*(1.0f-c)+z*s, y*y*(1.0f-c)+c, y*z*(1.0f-c)-x*s, 0.0f,
z*x*(1.0f-c)-y*s z*y*(1.0f-c)+x*s, z*z*(1.0f-c)+c, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f };
multiplyMatrix(&rotationMatrix[0], rotation, &rotationMatrix[0]);
}
See further:
GLSL 4×4 Matrix Fields
GLSL Programming/Vector and Matrix Operations
Data Type (GLSL)
The following two methods are taken from the ios GLKit framework:
GLK_INLINE GLKMatrix4 GLKMatrix4MakeOrtho(float left, float right,
float bottom, float top,
float nearZ, float farZ)
{
float ral = right + left;
float rsl = right - left;
float tab = top + bottom;
float tsb = top - bottom;
float fan = farZ + nearZ;
float fsn = farZ - nearZ;
GLKMatrix4 m = { 2.0f / rsl, 0.0f, 0.0f, 0.0f,
0.0f, 2.0f / tsb, 0.0f, 0.0f,
0.0f, 0.0f, -2.0f / fsn, 0.0f,
-ral / rsl, -tab / tsb, -fan / fsn, 1.0f };
return m;
}
GLK_INLINE GLKMatrix4 GLKMatrix4MakePerspective(float fovyRadians, float aspect, float nearZ, float farZ)
{
float cotan = 1.0f / tanf(fovyRadians / 2.0f);
GLKMatrix4 m = { cotan / aspect, 0.0f, 0.0f, 0.0f,
0.0f, cotan, 0.0f, 0.0f,
0.0f, 0.0f, (farZ + nearZ) / (nearZ - farZ), -1.0f,
0.0f, 0.0f, (2.0f * farZ * nearZ) / (nearZ - farZ), 0.0f };
return m;
}
I would like to smoothly move from perspective view to ortho view and vice-versa. How should i calculate the correct parameters for ortho matrix, given the perspective matrix and parameters?
Recently I've been struggling just to set up a good perspective projection matrix and to apply it to a simple triangle. Before I show any code, I have a small question about matrix order: Do I have to multiply my view matrix with my projection matrix or multiply my projection matrix with my view matrix?
Ok now the code. I have tried many different ways to do a perspective matrix without any good result.
1
static Matrix4x4<T> Perspective_S(const T &fovy, const T &aspectRatio, const T &zNear, const T &zFar)
{
T range = tanf(fovy / 2.0f) * zNear;
return Matrix4x4<T>((2.0f * zNear) / (range * aspectRatio + range * aspectRatio), 0.0f, 0.0f, 0.0f,
0.0f, zNear / range, 0.0f, 0.0f,
0.0f, 0.0f, -(zFar + zNear) / (zFar - zNear), -1.0f,
0.0f, 0.0f, (-(2.0f * zFar * zNear) / (zFar - zNear)), 0.0f);
}
2
static Matrix4x4<T> Perspective_S(const T &fovy, const T &aspectRatio, const T &zNear, const T &zFar)
{
T f = 1.0f / tan(fovy / 2.0f);
return Matrix4x4<T>(f / aspectRatio, 0.0f, 0.0f, 0.0f,
0.0f, f, 0.0f, 0.0f,
0.0f, 0.0f, (zFar + zNear) / (zNear - zFar), (2.0f * zFar * zNear) / (zNear - zFar),
0.0f, 0.0f, -1.0f, 0.0f);;
}
3
static Matrix4x4<T> Frustum_S(const T &left, const T &right, const T &bottom, const T &top,
const T &zNear, const T &zFar)
{
return Matrix4x4<T>(2.0f * zNear / (right - left), 0.0f, 0.0f, 0.0f,
0.0f, 2.0f * zNear / (top - bottom), 0.0f, 0.0f,
(right + left) / (right - left), (top + bottom) / (top - bottom), -(zFar + zNear) / (zFar - zNear), -1.0f,
0.0f, 0.0f, -2.0f * zFar * zNear / (zFar - zNear), 0.0f);
}
static Matrix4x4<T> Perspective_S(const T &fovy, const T &aspectRation, const T &zNear, const T &zFar)
{
T scale = tan(fovy) * zNear;
T r = aspectRation * scale, l = -r;
T t = scale, b = -t;
return Frustum_S(l, r, b, t, zNear, zFar);
}
4
static void Perspective_S(Matrix4x4<T> &matrix, T fovyInDegrees, T aspectRatio, T znear, T zfar)
{
T ymax = znear * tanf(fovyInDegrees * 3.14159265358979323846 / 360.0); //c'est pas 180?
//ymin = -ymax;
//xmin = -ymax * aspectRatio;
T xmax = ymax * aspectRatio;
Frustum(matrix, -xmax, xmax, -ymax, ymax, znear, zfar);
}
static void Frustum_S(Matrix4x4<T> &matrix, T left, T right, T bottom, T top,
T znear, T zfar)
{
T temp = 2.0f * znear;
T temp2 = right - left;
T temp3 = top - bottom;
T temp4 = zfar - znear;
matrix = Matrix4x4<T>(temp / temp2, 0.0f, 0.0f, 0.0f,
0.0f, temp / temp3, 0.0f, 0.0f,
(right + left) / temp2, (top + bottom) / temp3, (-zfar - znear) / temp4, -1.0f,
0.0f, 0.0f, (-temp * zfar) / temp4, 0.0f);
}
Some of the functions look like the transposed resulting matrix of some of my other trys. All of those functions were taken from tutorials. One even came from my previous post and it's still not working...
Just in case you might think it's my LookAt code, here it is:
What I do in main.cpp
matptr = (Matrix4x4f::LookAt_S(eye, center, up) *
Matrix4x4f::Perspective_S(M_PI / 3.0f, (float)window->getSize().x / (float)window->getSize().y, 0.001f, 1000.0f)).ToArray();
glUniformMatrix4fv(glGetUniformLocation(shaderProgram, "myMatrix"), 1, GL_FALSE, &matptr[0]);
My LookAt code:
static Matrix4x4<T> LookAt_S(Vector3<T> &eye, Vector3<T> ¢er, Vector3<T> &up)
{
Vector3<T> forward(center - eye);
forward.Normalize();
Vector3<T> side(forward.CrossProduct(up));
side.Normalize();
up = side.CrossProduct(forward);
return Matrix4x4<T> (side.x, up.x, -forward.x, 0.0f,
side.y, up.y, -forward.y, 0.0f,
side.z, up.z, -forward.z, 0.0f);
}
I wrote a shadow map shader for my graphics engine. I followed these tutorials:
Part 1 and the following part.
Unfortunately, the results I get are quite a bit off. Here are some screenshots. They show what my scene normally looks like, the scene with enabled shadows and the content of the shadow map (please ignore the white stuff in the center, thats just the ducks's geometry).
This is how I compute the coordinates to sample the shadow map with in my fragment shader:
float calcShadowFactor(vec4 lightSpacePosition) {
vec3 projCoords = lightSpacePosition.xyz / lightSpacePosition.w;
vec2 uvCoords;
uvCoords.x = 0.5 * projCoords.x + 0.5;
uvCoords.y = 0.5 * projCoords.y + 0.5;
float z = 0.5 * projCoords.z + 0.5;
float depth = texture2D(shadowMapSampler, uvCoords).x;
if (depth < (z + 0.00001f))
return 0.0f;
else
return 1.0f;
}
The lightSpacePosition vector is computed by:
projectionMatrix * inverseLightTransformationMatrix
* modelTransformationMatrix * vertexPosition
The projection matrix is:
[1.0f / (tan(fieldOfView / 2) * (width / height)), 0.0f, 0.0f, 0.0f]
[0.0f, 1.0f / (tan(fieldOfView / 2), 0.0f, 0.0f]
[0.0f, 0.0f, (-zNear - zFar) / (zNear - zFar), 2.0f * zFar * zNear / (zNear - zFar)]
[0.0f, 0.0f, 1.0f, 0.0f]
My shadow map seems to be okay and I made sure the rendering pass uses the same lightSpacePosition vector as my shadow map pass. But I can't figure out what is wrong.
Although I do not understand this entirely, I think I found the bug:
I needed to transform the coordinates to NDC space and THEN multiply the matrices. My shadow coordinate computation now looks like this:
mat4 biasMatrix = mat4(
0.5f, 0.0f, 0.0f, 0.0f,
0.0f, 0.5f, 0.0f, 0.0f,
0.0f, 0.0f, 0.5f, 0.0f,
0.5f, 0.5f, 0.5f, 1.0f
);
vec4 shadowCoord0 = biasMatrix * light * vec4(vertexPosition, 1.0f);
shadowCoord = shadowCoord0.xyz / shadowCoord0.w;
where
light = projectionMatrix * inverseLightTransformationMatrix
* modelTransformationMatrix
Now the fragment shader's shadow factor computation is rather simple:
float shadowFactor = 1.0f;
if (texture(shadowMapSampler, shadowCoord.xy).z < shadowCoord.z - 0.0001f)
shadowFactor = 0.0f;