OpenGL GLSL - Projection matrix not working - c++

I've got a basic OpenGL application and I want to use my projection matrix.
This is my matrix:
WorldCoordinates.m[0][0] = 2.0f / Width - 1.0f; WorldCoordinates.m[0][1] = 0; WorldCoordinates.m[0][2] = 0, WorldCoordinates.m[0][3] = 0;
WorldCoordinates.m[1][0] = 0; WorldCoordinates.m[1][1] = 2.0f / Height - 1.0f; WorldCoordinates.m[1][2] = 0, WorldCoordinates.m[1][3] = 0;
WorldCoordinates.m[2][0] = 0; WorldCoordinates.m[2][1] = 0; WorldCoordinates.m[2][2] = 0, WorldCoordinates.m[2][3] = 0;
WorldCoordinates.m[3][0] = 0; WorldCoordinates.m[3][1] = 0; WorldCoordinates.m[3][2] = 0, WorldCoordinates.m[3][3] = 0;
(WorldCoordinates is the Matrix4 struct that contains just a variable called m that is a float[4][4])(Width and Height are two ints).
I then apply this coordinates to my vertex shader using this:
shader.Bind();
glUniformMatrix4fv(glGetUniformLocation(shader.GetProgramID(), "worldCoordinates"), 1, GL_TRUE, &WorldCoordinates.m[0][0]);
(Shader is a class and has got a Bind() method that is just glUseProgram).
This is my Vertex Shader GLSL
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 Color;
out vec2 TexCoord;
uniform mat4 worldCoordinates;
void main()
{
gl_Position = worldCoordinates * vec4(position, 1.0f);
Color = color;
TexCoord = texCoord;
}
Using this, it doesn't work. But changing the gl_Position call to this:
gl_Position = vec4(vec3(position.x * 1/400 -1, position.y * 1/300 -1, 1.0f), 1.0f);
it renders as expected. Why is that?

This is how you build a orthogonal projection matrix :
static void
mat4_ortho(mat4_t m, float left, float right, float bottom, float top, float near, float far)
{
float rl = right - left;
float tb = top - bottom;
float fn = far - near;
mat4_zero(m);
m[ 0] = 2.0f / rl;
m[ 5] = 2.0f / tb;
m[10] = -2.0f / fn;
m[12] = -(left + right) / rl;
m[13] = -( top + bottom) / tb;
m[14] = -( far + near) / fn;
m[15] = 1.0f;
}
For you case, you'd set left=0, right=width, bottom=0, top=height, near and far don't matter, just set -1.0 and 1.0 for instance.
With such a matrix, the vertex coordinates you use for drawing will map 1:1 with the pixels on screen.

Related

Compute Normals After Vertex Deformation?

I am coding a vertex and a fragment shader trying to distort the surface of some water and then computing blinn-phong lighting on the surface. I am able to successfully compute the deformed matrices with a simple noise function, but how can I find the distorted normals? Since it isn't a linear transformation I am stuck, could anyone help?
Here are the relevant files:
vertex shader:
#version 150
uniform mat4 u_Model;
uniform mat4 u_ModelInvTr;
uniform mat4 u_ViewProj;
uniform vec4 u_Color;
uniform int u_Time;
in vec4 vs_Pos; // The array of vertex positions passed to the shader
in vec4 vs_Nor; // The array of vertex normals passed to the shader
in vec4 vs_Col; // The array of vertex colors passed to the shader.
in vec2 vs_UV; // UV coords for texture to pass thru to fragment shader
in float vs_Anim; // 0.f or 1.f To pass thru to fragment shader
in float vs_T2O;
out vec4 fs_Pos;
out vec4 fs_Nor;
out vec4 fs_LightVec;
out vec4 fs_Col;
out vec2 fs_UVs;
out float fs_Anim;
out float fs_dimVal;
out float fs_T2O;
uniform vec4 u_CamPos;
out vec4 fs_CamPos;
const vec4 lightDir = normalize(vec4(0.0, 1.f, 0.0, 0));
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0, oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0, 0.0, 0.0, 0.0, 1.0);
}
vec4 rotateLightVec(float deg, vec4 LV) {
mat4 R = rotationMatrix(vec3(0,0,1), deg);
return R * LV;
}
float random1(vec3 p) {
return fract(sin(dot(p, vec3(127.1, 311.7, 191.999)))*43758.5453);
}
vec3 random2( vec3 p ) {
return fract( sin( vec3(dot(p, vec3(127.1, 311.7, 58.24)), dot(p, vec3(269.5, 183.3, 657.3)), dot(p, vec3(420.69, 69.420, 469.20))) ) * 43758.5453);
}
void main()
{
fs_Col = vs_Col;
fs_UVs = vs_UV;
fs_Anim = vs_Anim;
fs_T2O = vs_T2O;
mat3 invTranspose = mat3(u_ModelInvTr);
fs_Nor = vec4(invTranspose * vec3(vs_Nor), 0);
vec4 modelposition = u_Model * vs_Pos;
if (vs_Anim != 0) { // if we want to animate this surface
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y <= 4.f/16.f;
if (water) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.05) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
} else if (lava) {
// define an oscillating time so that model can transition back and forth
float t = (cos(u_Time * 0.01) + 1)/2; // u_Time increments by 1 every frame. Domain [0,1]
vec3 temp = random2(vec3(modelposition.x, modelposition.y, modelposition.z)); // range [0, 1]
temp = (temp - 0.5)/25; // [0, 1/scalar]
modelposition.x = mix(modelposition.x - temp.x, modelposition.x + temp.x, t);
modelposition.y = mix(modelposition.y - temp.y, modelposition.y + 3*temp.y, t);
modelposition.z = mix(modelposition.z - temp.z, modelposition.z + temp.z, t);
}
}
fs_dimVal = random1(modelposition.xyz/100.f);
fs_LightVec = rotateLightVec(0.001 * u_Time, lightDir); // Compute the direction in which the light source lies
fs_CamPos = u_CamPos; // uniform handle for the camera position instead of the inverse
fs_Pos = modelposition;
gl_Position = u_ViewProj * modelposition;// gl_Position is a built-in variable of OpenGL which is
// used to render the final positions of the geometry's vertices
}
fragment shader:
#version 330
uniform vec4 u_Color; // The color with which to render this instance of geometry.
uniform sampler2D textureSampler;
uniform int u_Time;
uniform mat4 u_ViewProj;
uniform mat4 u_Model;
in vec4 fs_Pos;
in vec4 fs_Nor;
in vec4 fs_LightVec;
in vec4 fs_Col;
in vec2 fs_UVs;
in float fs_Anim;
in float fs_T2O;
in float fs_dimVal;
out vec4 out_Col;
in vec4 fs_CamPos;
float random1(vec3 p) {
return fract(sin(dot(p,vec3(127.1, 311.7, 191.999)))
*43758.5453);
}
float random1b(vec3 p) {
return fract(sin(dot(p,vec3(169.1, 355.7, 195.999)))
*95751.5453);
}
float mySmoothStep(float a, float b, float t) {
t = smoothstep(0, 1, t);
return mix(a, b, t);
}
float cubicTriMix(vec3 p) {
vec3 pFract = fract(p);
float llb = random1(floor(p) + vec3(0,0,0));
float lrb = random1(floor(p) + vec3(1,0,0));
float ulb = random1(floor(p) + vec3(0,1,0));
float urb = random1(floor(p) + vec3(1,1,0));
float llf = random1(floor(p) + vec3(0,0,1));
float lrf = random1(floor(p) + vec3(1,0,1));
float ulf = random1(floor(p) + vec3(0,1,1));
float urf = random1(floor(p) + vec3(1,1,1));
float mixLoBack = mySmoothStep(llb, lrb, pFract.x);
float mixHiBack = mySmoothStep(ulb, urb, pFract.x);
float mixLoFront = mySmoothStep(llf, lrf, pFract.x);
float mixHiFront = mySmoothStep(ulf, urf, pFract.x);
float mixLo = mySmoothStep(mixLoBack, mixLoFront, pFract.z);
float mixHi = mySmoothStep(mixHiBack, mixHiFront, pFract.z);
return mySmoothStep(mixLo, mixHi, pFract.y);
}
float fbm(vec3 p) {
float amp = 0.5;
float freq = 4.0;
float sum = 0.0;
for(int i = 0; i < 8; i++) {
sum += cubicTriMix(p * freq) * amp;
amp *= 0.5;
freq *= 2.0;
}
return sum;
}
void main()
{
vec4 diffuseColor = texture(textureSampler, fs_UVs);
bool apply_lambert = true;
float specularIntensity = 0;
if (fs_Anim != 0) {
// check region in texture to decide which animatable type is drawn
bool lava = fs_UVs.x >= 13.f/16.f && fs_UVs.y < 2.f/16.f;
bool water = !lava && fs_UVs.x >= 13.f/16.f && fs_UVs.y < 4.f/16.f;
if (lava) {
// slowly gyrate texture and lighten and darken with random dimVal from vert shader
vec2 movingUVs = vec2(fs_UVs.x + fs_Anim * 0.065/16 * sin(0.01*u_Time),
fs_UVs.y - fs_Anim * 0.065/16 * sin(0.01*u_Time + 3.14159/2));
diffuseColor = texture(textureSampler, movingUVs);
vec4 warmerColor = diffuseColor + vec4(0.3, 0.3, 0, 0);
vec4 coolerColor = diffuseColor - vec4(0.1, 0.1, 0, 0);
diffuseColor = mix(warmerColor, coolerColor, 0.5 + fs_dimVal * 0.65*sin(0.02*u_Time));
apply_lambert = false;
} else if (water) {
// blend between 3 different points in texture to create a wavy subtle change over time
vec2 offsetUVs = vec2(fs_UVs.x - 0.5f/16.f, fs_UVs.y - 0.5f/16.f);
diffuseColor = texture(textureSampler, fs_UVs);
vec4 altColor = texture(textureSampler, offsetUVs);
altColor.x += fs_dimVal * pow(altColor.x+.15, 5);
altColor.y += fs_dimVal * pow(altColor.y+.15, 5);
altColor.z += 0.5 * fs_dimVal * pow(altColor.z+.15, 5);
diffuseColor = mix(diffuseColor, altColor, 0.5 + 0.35*sin(0.05*u_Time));
offsetUVs -= 0.25f/16.f;
vec4 newColor = texture(textureSampler, offsetUVs);
diffuseColor = mix(diffuseColor, newColor, 0.5 + 0.5*sin(0.025*u_Time)) + fs_dimVal * vec4(0.025);
diffuseColor.a = 0.7;
// ----------------------------------------------------
// Blinn-Phong Shading
// ----------------------------------------------------
vec4 lightDir = normalize(fs_LightVec - fs_Pos);
vec4 viewDir = normalize(fs_CamPos - fs_Pos);
vec4 halfVec = normalize(lightDir + viewDir);
float shininess = 400.f;
float specularIntensity = max(pow(dot(halfVec, normalize(fs_Nor)), shininess), 0);
}
}
// Calculate the diffuse term for Lambert shading
float diffuseTerm = dot(normalize(fs_Nor), normalize(fs_LightVec));
// Avoid negative lighting values
diffuseTerm = clamp(diffuseTerm, 0, 1);
float ambientTerm = 0.3;
float lightIntensity = diffuseTerm + ambientTerm; //Add a small float value to the color multiplier
//to simulate ambient lighting. This ensures that faces that are not
//lit by our point light are not completely black.
vec3 col = diffuseColor.rgb;
// Compute final shaded color
if (apply_lambert) {
col = col * lightIntensity + col * specularIntensity;
}
// & Check the rare, special case where we draw face between two diff transparent blocks as opaque
if (fs_T2O != 0) {
out_Col = vec4(col, 1.f);
} else {
out_Col = vec4(col, diffuseColor.a);
}
// distance fog!
vec4 fogColor = vec4(0.6, 0.75, 0.9, 1.0);
float FC = gl_FragCoord.z / gl_FragCoord.w / 124.f;
float falloff = clamp(1.05 - exp(-1.05f * (FC - 0.9f)), 0.f, 1.f);
out_Col = mix(out_Col, fogColor, falloff);
}
I tried implementing blinn-phong in the fragment shader, but I think it is wrong simple from the wrong normals. I think this can be done with some sort of tangent and cross product solution, but how can I know the tangent of the surface given we only know the vertex position?
I am not using unity, just bare c++ and most of the answers I am finding online are for java or unity which I do not understand.`

Problem with normals when drawing instanced mesh

I'm rendering a sphere with instanced drawing, while rotating the model-view-matrix around the Y axis.
It looks ok at the beginning:
But at another angle, things get worse:
It looks to me like a problem with normals. Currently, I'm calculating the normal-matrix from my model-view-matrix and then pass it to the shader, which is doing phong-like lighting:
attribute vec4 a_position;
attribute vec3 a_normal;
attribute vec4 a_color;
attribute vec2 a_coord;
attribute mat4 a_matrix;
uniform mat4 u_mv_matrix;
uniform mat4 u_projection_matrix;
uniform mat3 u_normal_matrix;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec4 transformedPosition = u_mv_matrix * a_matrix * a_position;
v_position = transformedPosition;
v_normal = u_normal_matrix * a_normal;
v_color = a_color;
v_coord = a_coord;
gl_Position = u_projection_matrix * transformedPosition;
}
uniform sampler2D u_sampler;
varying vec4 v_position;
varying vec3 v_normal;
varying vec4 v_color;
varying vec2 v_coord;
void main() {
vec3 lightPosition = vec3(0.0); // XXX
// set diffuse and specular colors
vec3 cDiffuse = (v_color * texture2D(u_sampler, v_coord)).rgb;
vec3 cSpecular = vec3(0.3);
// lighting calculations
vec3 N = normalize(v_normal);
vec3 L = normalize(lightPosition - v_position.xyz);
vec3 E = normalize(-v_position.xyz);
vec3 H = normalize(L + E);
// Calculate coefficients.
float phong = max(dot(N, L), 0.0);
const float kMaterialShininess = 20.0;
const float kNormalization = (kMaterialShininess + 8.0) / (3.14159265 * 8.0);
float blinn = pow(max(dot(N, H), 0.0), kMaterialShininess) * kNormalization;
// diffuse coefficient
vec3 diffuse = phong * cDiffuse;
// specular coefficient
vec3 specular = blinn * cSpecular;
gl_FragColor = vec4(diffuse + specular, 1);
}
Final note: I'm working on desktop OpenGL 2.1 as well as WebGL on the browser.
Edit: Per request, I'm adding some information.
The mesh is built as follows, by passing an identity matrix:
void Sphere::append(IndexedVertexBatch<XYZ.N.UV> &batch, const Matrix &matrix) const {
float sectorStep = TWO_PI / sectorCount;
float stackStep = PI / stackCount;
for(int i = 0; i <= stackCount; ++i) {
float stackAngle = HALF_PI - i * stackStep;
float xy = radius * cosf(stackAngle);
float z = radius * sinf(stackAngle);
for(int j = 0; j <= sectorCount; ++j) {
float sectorAngle = j * sectorStep;
float x = xy * cosf(sectorAngle);
float y = xy * sinf(sectorAngle);
float nx = x / radius;
float ny = y / radius;
float nz = z / radius;
float s = (float)j / sectorCount;
float t = (float)i / stackCount;
batch.addVertex(matrix.transformPoint(x, y, z), matrix.transformNormal(nx, ny, nz), glm::vec2(s, t));
}
}
for(int i = 0; i < stackCount; ++i) {
float k1 = i * (sectorCount + 1);
float k2 = k1 + sectorCount + 1;
for(int j = 0; j < sectorCount; ++j, ++k1, ++k2) {
if (i != 0) {
if (frontFace == CCW) {
batch.addIndices(k1, k1 + 1, k2);
} else {
batch.addIndices(k1, k2, k1 + 1);
}
}
if (i != (stackCount - 1)) {
if (frontFace == CCW) {
batch.addIndices(k1 + 1, k2 + 1, k2);
} else {
batch.addIndices(k1 + 1, k2, k2 + 1);
}
}
}
}
}
Regarding the transformation matrices, it works as follow:
camera.getMVMatrix()
.setIdentity()
.translate(0, -150, -600)
.rotateY(clock()->getTime() * 0.5f);
State()
.setShader(shader)
.setShaderMatrix<MV>(camera.getMVMatrix())
.setShaderMatrix<PROJECTION>(camera.getProjectionMatrix())
.setShaderMatrix<NORMAL>(camera.getNormalMatrix())
.apply();
Finally, the light position is defined as vec3(0) in the fragment shader.
Note: As you can see, I'm using my own framework which provides among other things high level methods for building meshes and handling transformations. It's all straightforward stuff, proven to work as intended, but let me know if you need pointers to the source-code.
Update: The lighting part of the shader I used ended up being wrong, so I switched to another method.
But in essence, the solution I proposed in my answer is still valid (or at least it does the job of solving the "normal problem" when instancing is used, and non-uniform scaling is avoided.)
Here is a gist with the source-code. There is also an online WebGL demo.
The solution was relatively simple: there is no point in passing a normal-matrix to the shader.
Instead, the normal needs to be computed in the vertex shader:
v_normal = vec3(u_mv_matrix * a_matrix * vec4(a_normal, 0.0));
Credits

Shader Flipping Faces

I'm trying to construct a render engine using OpenGL and C++. but can't seem to get past this problem. The same model is being rendered 5 different times using different shaders, in 4 out of the 5 shaders the backface culling is working properly. In the tessellation shader, however, it is not. Any outwards faces are invisible, so you can see directly to the rear ones. Does anyone know why this shader flips the faces?
Vertex Shader
void main()
{
worldVertexPosition_cs = (transformationMatrix * vec4(position_vs, 1.0)).xyz;
worldTextureCoords_cs = textureCoords_vs;
worldNormal_cs = mat3(transpose(inverse(transformationMatrix))) * normal_vs;
}
Control Shader
float getTessLevel(float distance0, float distance1)
{
float avgDistance = (distance0 + distance1) / 2.0;
avgDistance = (100 - avgDistance) / 20;
if (avgDistance < 1) {
avgDistance = 1;
}
return avgDistance;
}
void main()
{
worldTextureCoords_es[gl_InvocationID] = worldTextureCoords_cs[gl_InvocationID];
worldNormal_es[gl_InvocationID] = worldNormal_cs[gl_InvocationID];
worldVertexPosition_es[gl_InvocationID] = worldVertexPosition_cs[gl_InvocationID];
float eyeToVertexDistance0 = distance(eyePos, worldVertexPosition_es[0]);
float eyeToVertexDistance1 = distance(eyePos, worldVertexPosition_es[1]);
float eyeToVertexDistance2 = distance(eyePos, worldVertexPosition_es[2]);
gl_TessLevelOuter[0] = getTessLevel(eyeToVertexDistance1, eyeToVertexDistance2);
gl_TessLevelOuter[1] = getTessLevel(eyeToVertexDistance2, eyeToVertexDistance0);
gl_TessLevelOuter[2] = getTessLevel(eyeToVertexDistance0, eyeToVertexDistance1);
gl_TessLevelInner[0] = gl_TessLevelOuter[2];
}
Evaluation Shader
vec2 interpolate2D(vec2 v0, vec2 v1, vec2 v2)
{
return vec2(gl_TessCoord.x) * v0 + vec2(gl_TessCoord.y) * v1 + vec2(gl_TessCoord.z) * v2;
}
vec3 interpolate3D(vec3 v0, vec3 v1, vec3 v2)
{
return vec3(gl_TessCoord.x) * v0 + vec3(gl_TessCoord.y) * v1 + vec3(gl_TessCoord.z) * v2;
}
void main()
{
worldTextureCoords_fs = interpolate2D(worldTextureCoords_es[0], worldTextureCoords_es[1], worldTextureCoords_es[2]);
worldNormal_fs = interpolate3D(worldNormal_es[0], worldNormal_es[1], worldNormal_es[2]);
worldNormal_fs = normalize(worldNormal_fs);
worldVertexPosition_fs = interpolate3D(worldVertexPosition_es[0], worldVertexPosition_es[1], worldVertexPosition_es[2]);
float displacement = texture(texture_displacement0, worldTextureCoords_fs.xy).x;
worldVertexPosition_fs += worldNormal_fs * (displacement / 1.0f);
gl_Position = projectionMatrix * viewMatrix * vec4(worldVertexPosition_fs.xyz, 1.0);
}
Fragment Shader
void main()
{
vec3 unitNormal = normalize(worldNormal_fs);
vec3 unitLightVector = normalize(lightPosition - worldVertexPosition_fs);
float dotResult = dot(unitNormal, unitLightVector);
float brightness = max(dotResult, blackPoint);
vec3 diffuse = brightness * lightColor;
FragColor = vec4(diffuse, 1.0) * texture(texture_diffuse0, worldTextureCoords_fs);
FragColor.rgb = pow(FragColor.rgb, vec3(1.0/gamma));
}
In the Tessellation Evaluation Shader you've to define the winding order of the generated triangles.
This is done via the cw and ccw parameters. Default is ccw.
Either generate clockwise primitives:
layout(triangles, cw) in;
Or generate counterclockwise primitives:
layout(triangles, ccw) in;

How to implement Screen Space Reflection with DDA

I am trying to implement screen space reflection with DDA.
http://casual-effects.blogspot.jp/2014/08/screen-space-ray-tracing.html
But, not working well.
Below is my shader codes.
This is vertex shader code.
layout(location = 0) in vec4 position;
layout(location = 1) in vec4 color_0;
layout(location = 2) in vec3 normal;
uniform mat4 mtxL2W; // Local to World space.
uniform mat4 mtxW2C; // World to Clip space.
out vec4 varColor;
out vec3 varNormal;
void main()
{
gl_Position = mtxW2C * mtxL2W * position;
varColor = color_0;
varNormal = normalize(mtxL2W * vec4(normal, 0)).xyz;
}
This is fragment shader code.
in vec4 varColor;
in vec3 varNormal;
layout(location = 0) out vec4 outColor;
uniform sampler2D s0; // color
uniform sampler2D s1; // linear depth.
uniform mat4 mtxW2V; // World to View(Camera) space.
uniform mat4 mtxV2C; // View(Camera) to Clip space.
uniform mat4 mtxC2V; // Clip to View(Camera) space.
uniform mat4 mtxV2W; // View(Camera) to World space.
uniform vec4 camPos; // Camera position (World space).
uniform float nearPlaneZ;
uniform float maxDistance;
uniform float zThickness;
uniform int maxSteps;
uniform float stride;
float squaredLength(vec2 a, vec2 b)
{
a -= b;
return dot(a, a);
}
bool intersectsDepthBuffer(float z, float minZ, float maxZ)
{
z += zThickness;
return (maxZ >= z) && (minZ - zThickness <= z);
}
bool traceScreenSpaceRay(
vec3 csOrig,
vec3 csDir,
out vec2 hitPixel,
out vec3 hitPoint)
{
// Clip to the near plane.
float rayLength = (csOrig.z + csDir.z * maxDistance) < nearPlaneZ
? (nearPlaneZ - csOrig.z) / csDir.z
: maxDistance;
vec3 csEndPoint = csOrig + csDir * rayLength;
// Project into homogeneous clip space.
vec4 H0 = mtxV2C * vec4(csOrig, 1);
vec4 H1 = mtxV2C * vec4(csEndPoint, 1);
float k0 = 1.0 / H0.w;
float k1 = 1.0 / H1.w;
// The interpolated homogeneous version of the camera-space points.
vec3 Q0 = csOrig * k0;
vec3 Q1 = csEndPoint * k1;
// Screen space point.
vec2 P0 = H0.xy * k0;
vec2 P1 = H1.xy * k1;
// [-1, 1] -> [0, 1]
P0 = P0 * 0.5 + 0.5;
P1 = P1 * 0.5 + 0.5;
ivec2 texsize = textureSize(s0, 0);
P0 *= vec2(texsize.xy);
P1 *= vec2(texsize.xy);
P1.x = min(max(P1.x, 0), texsize.x);
P1.y = min(max(P1.y, 0), texsize.y);
// If the line is degenerate, make it cover at least one pixel to avoid handling zero-pixel extent as a special case later.
P1 += squaredLength(P0, P1) < 0.0001
? vec2(0.01, 0.01)
: vec2(0.0);
vec2 delta = P1 - P0;
// Permute so that the primary iteration is in x to collapse all quadrant-specific DDA cases later.
bool permute = false;
if (abs(delta.x) < abs(delta.y))
{
permute = true;
delta = delta.yx;
P0 = P0.yx;
P1 = P1.yx;
}
float stepDir = sign(delta.x);
float invdx = stepDir / delta.x;
// Track the derivatives of Q and k.
vec3 dQ = (Q1 - Q0) / invdx;
float dk = (k1 - k0) / invdx;
// y is slope.
// slope = (y1 - y0) / (x1 - x0)
vec2 dP = vec2(stepDir, delta.y / invdx);
// Adjust end condition for iteration direction
float end = P1.x * stepDir;
int stepCount = 0;
float prevZMaxEstimate = csOrig.z;
float rayZMin = prevZMaxEstimate;
float rayZMax = prevZMaxEstimate;
float sceneZMax = rayZMax + 100.0f;
dP *= stride;
dQ *= stride;
dk *= stride;
vec4 PQk = vec4(P0, Q0.z, k0);
vec4 dPQk = vec4(dP, dQ.z, dk);
vec3 Q = Q0;
for (;
((PQk.x * stepDir) <= end)
&& (stepCount < maxSteps)
&& !intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax)
&& (sceneZMax != 0.0);
++stepCount)
{
rayZMin = prevZMaxEstimate;
rayZMax = (PQk.z + dPQk.z * 0.5) / (PQk.w + dPQk.w * 0.5);
prevZMaxEstimate = rayZMax;
if (rayZMin > rayZMax) {
float tmp = rayZMin;
rayZMin = rayZMax;
rayZMax = tmp;
}
hitPixel = permute ? PQk.yx : PQk.xy;
//hitPixel.y = texsize.y - hitPixel.y;
sceneZMax = texelFetch(s1, ivec2(hitPixel), 0).r;
PQk += dPQk;
}
// Advance Q based on the number of steps
Q.xy += dQ.xy * stepCount;
hitPoint = Q * (1.0f / PQk.w);
hitPoint = vec3(sceneZMax, rayZMin, rayZMax);
return intersectsDepthBuffer(sceneZMax, rayZMin, rayZMax);
}
void main()
{
vec3 normal = normalize(varNormal);
float linearDepth = texelFetch(s1, ivec2(gl_FragCoord.xy), 0).r;
ivec2 texsize = textureSize(s0, 0);
// Ray origin is camera origin.
vec3 rayOrg = camPos.xyz;
// Screen coordinate.
vec4 pos = vec4(gl_FragCoord.xy / texsize, 0, 1);
// [0, 1] -> [-1, 1]
pos.xy = pos.xy * 2.0 - 1.0;
// Screen-space -> Clip-space
pos.xy *= linearDepth;
// Clip-space -> View-space
pos = mtxC2V * pos;
pos.z = linearDepth;
// View-space -> World-space.
vec3 worldPos = (mtxV2W * vec4(pos.xyz, 1)).xyz;
// Compute ray direction.
// From ray origin to world position.
vec3 rayDir = normalize(worldPos - rayOrg);
// Compute reflection vector.
vec3 refDir = reflect(rayDir, normal);
// Reflection vector origin is world position.
vec3 refOrg = worldPos;
// Transform to view coordinate.
refOrg = (mtxW2V * vec4(refOrg, 1)).xyz;
refDir = (mtxW2V * vec4(refDir, 0)).xyz;
vec2 hitPixel = vec2(0, 0);
vec3 hitPoint = vec3(0, 0, 0);
// Trace screen space ray.
bool isIntersect = traceScreenSpaceRay(refOrg, refDir, hitPixel, hitPoint);
vec2 uv = hitPixel / texsize.xy;
if (uv.x > 1.0 || uv.x < 0.0f || uv.y > 1.0 || uv.y < 0.0) {
isIntersect = false;
}
if (isIntersect) {
outColor = varColor * texture(s0, uv);
}
else {
outColor = vec4(1, 1, 1, 1);
}
}
I think Q0.z and Q1.z are always 1.0.
So, I think dQ.z is also always 0.0.
And, dk is always minus value.
What is wrong?

Determining cascaded shadow map split ranges

I'm doing cascaded shadow maps, and I believe I have a problem concerning the way I do the split comparison to select the proper shadow map. As it stands, the shadow mapping works overall but in a few cases at certain angles it does not work.
Currently the lighting shader stage looks like this:
"#version 420
const float DEPTH_BIAS = 0.00005;
layout(std140) uniform UnifDirLight
{
mat4 mVPMatrix[4];
mat4 mCamViewMatrix;
vec4 mSplitDistance;
vec4 mLightColor;
vec4 mLightDir;
vec4 mGamma;
vec2 mScreenSize;
} UnifDirLightPass;
layout (binding = 2) uniform sampler2D unifPositionTexture;
layout (binding = 3) uniform sampler2D unifNormalTexture;
layout (binding = 4) uniform sampler2D unifDiffuseTexture;
layout (binding = 6) uniform sampler2DArrayShadow unifShadowTexture;
out vec4 fragColor;
void main()
{
vec2 texcoord = gl_FragCoord.xy / UnifDirLightPass.mScreenSize;
vec3 worldPos = texture(unifPositionTexture, texcoord).xyz;
vec3 normal = normalize(texture(unifNormalTexture, texcoord).xyz);
vec3 diffuse = texture(unifDiffuseTexture, texcoord).xyz;
vec4 camPos = UnifDirLightPass.mCamViewMatrix * vec4(worldPos, 1.0); // legit way of determining the split?
int index = 3;
if (camPos .z > UnifDirLightPass.mSplitDistance.x)
index = 0;
else if (camPos .z > UnifDirLightPass.mSplitDistance.y)
index = 1;
else if (camPos .z > UnifDirLightPass.mSplitDistance.z)
index = 2;
vec4 projCoords = UnifDirLightPass.mVPMatrix[index] * vec4(worldPos, 1.0);
projCoords.w = projCoords.z - DEPTH_BIAS;
projCoords.z = float(index);
float visibilty = texture(unifShadowTexture, projCoords);
float angleNormal = clamp(dot(normal, UnifDirLightPass.mLightDir.xyz), 0, 1);
fragColor = vec4(diffuse, 1.0) * visibilty * angleNormal * UnifDirLightPass.mLightColor;
}
And the "mSplitDistance", each component is the center fardistance of the frustrum for that split, multiplied by the main cameras view matrix
Vec4 camFarDistCenter;
CameraFrustrum cameraFrustrum = CalculateCameraFrustrum(nearDistArr[cascadeIndex], farDistArr[cascadeIndex], lighting.mCameraPosition, lighting.mCameraDirection, camFarDistCenter);
.....
camFarDistCenter = lighting.mCameraViewMatrix * camFarDistCenter;
splitDistances[cascadeIndex] = camFarDistCenter.z;
Here's how I create the camera frustrum for each split, if its of interest, I believe this is a pretty common alghorithm:
CameraFrustrum CalculateCameraFrustrum(const float minDist, const float maxDist, const Vec3& cameraPosition, const Vec3& cameraDirection, Vec4& camFarZ)
{
CameraFrustrum ret = { Vec4(-1.0f, -1.0f, 1.0f, 1.0f), Vec4(-1.0f, -1.0f, -1.0f, 1.0f), Vec4(-1.0f, 1.0f, 1.0f, 1.0f), Vec4(-1.0f, 1.0f, -1.0f, 1.0f),
Vec4(1.0f, -1.0f, 1.0f, 1.0f), Vec4(1.0f, -1.0f, -1.0f, 1.0f), Vec4(1.0f, 1.0f, 1.0f, 1.0f), Vec4(1.0f, 1.0f, -1.0f, 1.0f) };
const Vec3 forwardVec = glm::normalize(cameraDirection);
const Vec3 rightVec = glm::normalize(glm::cross(forwardVec, Vec3(0.0f, 0.0f, 1.0f)));
const Vec3 upVec = glm::normalize(glm::cross(rightVec, forwardVec));
const Vec3 nearCenter = cameraPosition + forwardVec * minDist;
const Vec3 farCenter = cameraPosition + forwardVec * maxDist;
camFarZ = Vec4(farCenter, 1.0);
const float nearHeight = tan(glm::radians(70.0f) / 2.0f) * minDist;
const float nearWidth = nearHeight * 1920.0f / 1080.0f;
const float farHeight = tan(glm::radians(70.0f) / 2.0f) * maxDist;
const float farWidth = farHeight * 1920.0f / 1080.0f;
ret[0] = Vec4(nearCenter - (upVec * nearHeight) - (rightVec * nearWidth), 1.0);
ret[1] = Vec4(nearCenter + (upVec * nearHeight) - (rightVec * nearWidth), 1.0);
ret[2] = Vec4(nearCenter + (upVec * nearHeight) + (rightVec * nearWidth), 1.0);
ret[3] = Vec4(nearCenter - (upVec * nearHeight) + (rightVec * nearWidth), 1.0);
ret[4] = Vec4(farCenter - upVec * farHeight - rightVec * farWidth, 1.0);
ret[5] = Vec4(farCenter + upVec * farHeight - rightVec * farWidth, 1.0);
ret[6] = Vec4(farCenter + upVec * farHeight + rightVec * farWidth, 1.0);
ret[7] = Vec4(farCenter - upVec * farHeight + rightVec * farWidth, 1.0);
return ret;
}
Is it sound to do the split comparison in camera space like I do? Is that a potential problem?
void CalculateCameraFrustrum(glm::mat4& projectionMatrix,
glm::mat4 viewMatrix, // viewMatrix = light POV
glm::vec3 camera // camera = eye position + eye direction
float zNear,
float zFar,
glm::vec4 point[4]) // point[4] = shadow map boundaries
glm::mat4 shadMvp = projectionMatrix * (viewMatrix * glm::translate(camera));
glm::vec3 transf;
float maxX = zNear, minX = zFar,
maxY = zNear, minY = zFar;
int i = -1;
while (++i < 4)
{
transf = shadMvp * point[i];
transf.x /= transf.w;
transf.y /= transf.w;
if(transf.x > maxX)
maxX = transf.x;
if(transf.x < minX)
minX = transf.x;
if(transf.y > maxY)
maxY = transf.y;
if(transf.y < minY)
minY = transf.y;
}
float scaleX = 2.0f / (maxX - minX),
scaleY = 2.0f / (maxY - minY),
offsetX = -0.5f * (maxX + minX) * scaleX,
offsetY = -0.5f * (maxY + minY) * scaleY;
shadMvp = glm::mat4(1); // Identity matrix
shadMvp[0][0] = scaleX;
shadMvp[1][1] = scaleY;
shadMvp[0][3] = offsetX;
shadMvp[1][3] = offsetY;
projectionMatrix *= shadMvp;
} // No need to calculate view frustum splitting,
// only the boundaries of the shadow maps levels are needed (glm::ortho(...)).
// :)