I am following along with the LearnOpenGL guide and am trying to implement Steep Parallax Mapping.
Everything seems to be working fine except my brick wall seems to have distinct visible layers whereas the photos in the guide don't show any layers. I was trying to use this code to parallax the topography of the world but these weird layers seem to show up there too so I was hoping to find a fix for this.
Layered wall photo
[1
Photo of how it should look
Here is my modified vertex shader
#version 300 es
in vec4 vPosition; // aPos
in vec2 texCoord; // aTexCoords
in vec4 vNormal; // aNormal
in vec4 vTangent; // aTangent
uniform mat4 model_view;
uniform mat4 projection;
uniform vec4 light_position;
out vec2 ftexCoord;
out vec3 vT;
out vec3 vN;
out vec4 position;
out vec3 FragPos;
out vec3 TangentLightPos;
out vec3 TangentViewPos;
out vec3 TangentFragPos;
void
main()
{
// Normal variables
vN = normalize(model_view * vNormal).xyz;
vT = normalize(model_view * vTangent).xyz;
vec4 veyepos = model_view*vPosition;
position = veyepos;
ftexCoord = texCoord;
// Displacement variables
vec3 bi = cross(vT, vN);
FragPos = vec3(model_view * vPosition).xyz;
vec3 T = normalize(mat3(model_view) * vTangent.xyz);
vec3 B = normalize(mat3(model_view) * bi);
vec3 N = normalize(mat3(model_view) * vNormal.xyz);
mat3 TBN = transpose(mat3(T, B, N));
TangentLightPos = TBN * light_position.xyz;
TangentViewPos = TBN * vPosition.xyz;
TangentFragPos = TBN * FragPos;
gl_Position = projection * model_view * vPosition;
}
and my modified fragment shader is here
#version 300 es
precision highp float;
in vec2 ftexCoord;
in vec3 vT; //parallel to surface in eye space
in vec3 vN; //perpendicular to surface in eye space
in vec4 position;
in vec3 FragPos;
in vec3 TangentLightPos;
in vec3 TangentViewPos;
in vec3 TangentFragPos;
uniform int mode;
uniform vec4 light_position;
uniform vec4 light_color;
uniform vec4 ambient_light;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
out vec4 fColor;
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
return currentTexCoords;
}
void main()
{
// DO NORMAL MAPPING
if (mode == 0) {
vec3 T = normalize(vT);
vec3 N = normalize(vN);
vec3 bi = cross(T, N);
mat4 changeOfCoord = mat4(vec4(T, 0), vec4(bi, 0), vec4(N, 0), vec4(0, 0, 0, 1));
vec3 L = normalize(light_position - position).xyz;
vec3 E = normalize(-position).xyz;
vec4 text = vec4(texture(normalMap, ftexCoord) * 2.0 - 1.0);
vec4 eye = changeOfCoord * text;
vec4 amb = texture(colorMap, ftexCoord) * ambient_light;
vec4 diff = max(0.0, dot(L, eye.xyz)) * light_color * texture(colorMap, ftexCoord);
fColor = amb + diff;
} else if (mode == 1) { // DO PARALLAX MAPPING
// offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(TangentViewPos - TangentFragPos);
vec2 texCoords = ftexCoord;
texCoords = ParallaxMapping(ftexCoord, viewDir);
// discard samples outside of the default texture coordinate space
if(texCoords.x > 1.0 || texCoords.y > 1.0 || texCoords.x < 0.0 || texCoords.y < 0.0)
discard;
// obtain normal from normal map
vec3 normal = texture(normalMap, texCoords).rgb;
//values stored in normal texture is [0,1] range, we need [-1, 1] range
normal = normalize(normal * 2.0 - 1.0);
// get diffuse color
vec3 color = texture(colorMap, texCoords).rgb;
// ambient
vec3 ambient = 0.1 * color;
// diffuse
vec3 lightDir = normalize(TangentLightPos - TangentFragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * color;
// specular
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(normal, halfwayDir), 0.0), 32.0);
vec3 specular = vec3(0.2) * spec;
fColor = vec4(ambient + diffuse + 0.0, 1.0);
}
}
The layers at acute gazing angles are a common effect at parallax mapping. To improve the result you've to increment the number of samples or implement Parallax Occlusion Mapping (as described in the bottom part of the tutorial):
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
// get texture coordinates before collision (reverse operations)
vec2 prevTexCoords = currentTexCoords + deltaTexCoords;
// get depth after and before collision for linear interpolation
float afterDepth = currentDepthMapValue - currentLayerDepth;
float beforeDepth = texture(depthMap, prevTexCoords).r - currentLayerDepth + layerDepth;
// interpolation of texture coordinates
float weight = afterDepth / (afterDepth - beforeDepth);
vec2 finalTexCoords = prevTexCoords * weight + currentTexCoords * (1.0 - weight);
return finalTexCoords;
}
By thee way, the vector seems to be inverted. In common the bitangent is the Cross product of the normal vector and the tangent in a Right-handed system. But that depends on the displacement texture.
vec3 bi = cross(vT, vN);
vec3 bi = cross(vN, vT);
See further:
Bump Mapping with javascript and glsl
Normal, Parallax and Relief mapping
Demo
Related
Most of the shader codes are follow the instruction of LearnOpenGL. I could make sure that the g-buffer and noise data pass into the shader are correct. It seems like some kind of dislocation, but But I really can't figure out why this happened.
misplace ssao
#version 450 core
out float OUT_FragColor;
in vec2 TexCoords;
uniform sampler2D g_position;
uniform sampler2D g_normal;
uniform sampler2D noise_texture;
struct CameraInfo
{
vec4 position;
mat4 view;
mat4 projection;
};
layout(std140, binding = 0) uniform Camera
{
CameraInfo camera;
};
float radius = 0.5;
float bias = 0.025;
uniform int noise_tex_size;
void main()
{
const vec2 noise_scale = vec2(1280.0/noise_tex_size, 720.0/noise_tex_size);
vec3 frag_pos = texture(g_position, TexCoords).xyz;
vec3 normal = normalize(texture(g_normal, TexCoords).xyz);
vec3 random_vec = normalize(texture(noise_texture, TexCoords * noise_scale).xyz);
vec3 tangent = normalize(random_vec - normal * dot(random_vec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
float occlusion = 0.f;
for(int i = 0; i < sample_array.length(); ++i)
{
vec3 sample_pos = TBN * sample_array[i].xyz;
sample_pos = frag_pos + sample_pos * radius;
vec4 offset = vec4(sample_pos, 1.0);
offset = camera.projection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide ?????
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
float sample_depth = texture(g_position, offset.xy).z;
float range_check = smoothstep(0.f, 1.f, radius / abs(frag_pos.z - sample_depth));
occlusion += (sample_depth >= sample_pos.z + bias ? 1.0 : 0.0) * range_check; //ignore sample points that too near the origin point
}
occlusion = 1.f - (occlusion / sample_array.length());
OUT_FragColor = occlusion;
}
transform the g_postion and g_normal into model space
FragPos = (camera.view * vec4(WorldPos, 1.0)).xyz;
mat4 normal_matrix = camera.view * mat4(transpose(inverse(mat3(model))));
FragNormal = mat3(normal_matrix) * normal;
The normal mapping looks great when the objects aren't rotated from the origin, and spot lights and directional lights work, but when I spin an object on the spot it darkens and then lightens again, just on the top face.
I'm testing using a cube. I've used a geometry shader to visualise my calculated normals (after multiplying by a TBN matrix), and they appear to be in the correct places. If I take the normal map out of the equation then the lighting is fine.
Here's where the TBN is calculated:
void calculateTBN()
{
//get the normal matrix
mat3 model = mat3(transpose(inverse(mat3(transform))));
vec3 T = normalize(vec3(model * tangent.xyz ));
vec3 N = normalize(vec3(model * normal ));
vec3 B = cross(N, T);
mat3 TBN = mat3( T , B , N);
outputVertex.TBN =TBN;
}
And the normal is sampled and transformed:
vec3 calculateNormal()
{
//Sort the input so that the normal is between 1 and minus 1 instead of 0 and 1
vec3 input = texture2D(normalMap, inputFragment.textureCoord).xyz;
input = 2.0 * input - vec3(1.0, 1.0, 1.0);
vec3 newNormal = normalize(inputFragment.TBN* input);
return newNormal;
}
My Lighting is in world space (as far as I understand the term, it takes into account the transform matrix but not the camera or projection matrix)
I did try the technique where I pass down the TBN as inverse (or transpose) and then multiplied every vector apart from the normal by it. That had the same effect. I'd rather work in world space anyway as apparently this is better for deffered lighting? Or so I've heard.
If you'd like to see any of the lighting code and so on I'll add it in but I didn't think it was necessary as it works apart from this.
EDIT::
As requested, here is vertex and part of frag shader
#version 330
uniform mat4 T; // Translation matrix
uniform mat4 S; // Scale matrix
uniform mat4 R; // Rotation matrix
uniform mat4 camera; // camera matrix
uniform vec4 posRelParent; // the position relative to the parent
// Input vertex packet
layout (location = 0) in vec4 position;
layout (location = 2) in vec3 normal;
layout (location = 3) in vec4 tangent;
layout (location = 4) in vec4 bitangent;
layout (location = 8) in vec2 textureCoord;
// Output vertex packet
out packet {
vec2 textureCoord;
vec3 normal;
vec3 vert;
mat3 TBN;
vec3 tangent;
vec3 bitangent;
vec3 normalTBN;
} outputVertex;
mat4 transform;
mat3 TBN;
void calculateTBN()
{
//get the model matrix, the transform of the object with scaling and transform removeds
mat3 model = mat3(transpose(inverse(transform)));
vec3 T = normalize(model*tangent.xyz);
vec3 N = normalize(model*normal);
//I used to retrieve the bitangents by crossing the normal and tangent but now they are calculated independently
vec3 B = normalize(model*bitangent.xyz);
TBN = mat3( T , B , N);
outputVertex.TBN = TBN;
//Pass though TBN vectors for colour debugging in the fragment shader
outputVertex.tangent = T;
outputVertex.bitangent = B;
outputVertex.normalTBN = N;
}
void main(void) {
outputVertex.textureCoord = textureCoord;
// Setup local variable pos in case we want to modify it (since position is constant)
vec4 pos = vec4(position.x, position.y, position.z, 1.0) + posRelParent;
//Work out the transform matrix
transform = T * R * S;
//Work out the normal for lighting
mat3 normalMat = transpose(inverse(mat3(transform)));
outputVertex.normal = normalize(normalMat* normal);
calculateTBN();
outputVertex.vert =(transform* pos).xyz;
//Work out the final pos of the vertex
gl_Position = camera * transform * pos;
}
And Lighting vector of fragment:
vec3 applyLight(Light thisLight, vec3 baseColor, vec3 surfacePos, vec3 surfaceToCamera)
{
float attenuation = 1.0f;
vec3 lightPos = (thisLight.finalLightMatrix*thisLight.position).xyz;
vec3 surfaceToLight;
vec3 coneDir = normalize(thisLight.coneDirection);
if (thisLight.position.w == 0.0f)
{
//Directional Light (all rays same angle, use position as direction)
surfaceToLight = normalize( (thisLight.position).xyz);
attenuation = 1.0f;
}
else
{
//Point light
surfaceToLight = normalize(lightPos - surfacePos);
float distanceToLight = length(lightPos - surfacePos);
attenuation = 1.0 / (1.0f + thisLight.attenuation * pow(distanceToLight, 2));
//Work out the Cone restrictions
float lightToSurfaceAngle = degrees(acos(dot(-surfaceToLight, normalize(coneDir))));
if (lightToSurfaceAngle > thisLight.coneAngle)
{
attenuation = 0.0;
}
}
}
Here's the main of the frag shader too:
void main(void) {
//get the base colour from the texture
vec4 tempFragColor = texture2D(textureImage, inputFragment.textureCoord).rgba;
//Support for objects with and without a normal map
if (useNormalMap == 1)
{
calcedNormal = calculateNormal();
}
else
{
calcedNormal = inputFragment.normal;
}
vec3 surfaceToCamera = normalize((cameraPos_World) - (inputFragment.vert));
vec3 tempColour = vec3(0.0, 0.0, 0.0);
for (int count = 0; count < numLights; count++)
{
tempColour += applyLight(allLights[count], tempFragColor.xyz, inputFragment.vert, surfaceToCamera);
}
vec3 gamma = vec3(1.0 / 2.2);
fragmentColour = vec4(pow(tempColour,gamma), tempFragColor.a);
//fragmentColour = vec4(calcedNormal, 1);
}
Edit 2:
The geometry shader used to visualize "sampled" normals by the TBN matrix as shown here:
void GenerateLineAtVertex(int index)
{
vec3 testSampledNormal = vec3(0, 0, 1);
vec3 bitangent = cross(gs_in[index].normal, gs_in[index].tangent);
mat3 TBN = mat3(gs_in[index].tangent, bitangent, gs_in[index].normal);
testSampledNormal = TBN * testSampledNormal;
gl_Position = gl_in[index].gl_Position;
EmitVertex();
gl_Position =
gl_in[index].gl_Position
+ vec4(testSampledNormal, 0.0) * MAGNITUDE;
EmitVertex();
EndPrimitive();
}
And it's vertex shader
void main(void) {
// Setup local variable pos in case we want to modify it (since position is constant)
vec4 pos = vec4(position.x, position.y, position.z, 1.0);
mat4 transform = T* R * S;
// Apply transformation to pos and store result in gl_Position
gl_Position = projection* camera* transform * pos;
mat3 normalMatrix = mat3(transpose(inverse(camera * transform)));
vs_out.tangent = normalize(vec3(projection * vec4(normalMatrix * tangent.xyz, 0.0)));
vs_out.normal = normalize(vec3(projection * vec4(normalMatrix * normal , 0.0)));
}
Here is the TBN vectors visualized. The slight angles on the points are due to an issue with how I'm applying the projection matrix, rather than mistakes in the actual vectors. The red lines just show where the arrows I've drawn on the texture are, they're not very clear from that angle that's all.
Problem Solved!
Actually nothing to do with the code above, although thanks to everyone that helped.
I was importing the texture using my own texture loader, which uses by default non-gamma corrected, SRGB colour in 32 bit. I switched it to 24bit and just RGB colour and it worked straight away. Typical developer problems....
Faced a problem when trying to create a spotlight in my scene. The problem is that my camera is moving around the scene, and because of this, there is something wrong with the lighting. In addition, I see only a black screen. I understand that I missed the transformation somewhere, or did some extra, but where - I really do not know.
Below is the code for my shaders.
Fragment shader:
#version 330 core
precision mediump float; // Set the default precision to medium. We don't need as high of a
// precision in the fragment shader.
#define MAX_LAMPS_COUNT 8 // Max lamps count.
uniform vec3 u_ViewPos; // Camera position
uniform int u_LampsCount; // Lamps count
uniform int u_ShadowMapWidth = 1024; // shadow map width / default is 1024
uniform int u_ShadowMapHeight = 1024; // shadow map height / default is 1024
uniform float brightnessThreshold = 0.5; // brightness threshold variable
uniform float far_plane = 16;
varying mat4 v_MVMatrix; // Model View matrix
varying mat3 v_TBN; // Tangent Bitangent Normal matrix
varying vec4 v_Position; // Position for this fragment.
varying vec3 v_Normal; // Interpolated normal for this fragment.
varying vec2 v_Texture; // Texture coordinates.
varying float v_NormalMapping; // Is normal mapping enabled 0 - false, 1 - true
struct Lamp {
float ambientStrength;
float diffuseStrength;
float specularStrength;
float kc; // constant term
float kl; // linear term
float kq; // quadratic term
int shininess;
vec3 lampPos; // in eye space, cameraViewMatrix * lamp world coordinates
vec3 lampColor;
};
uniform samplerCube shadowMaps[MAX_LAMPS_COUNT];
uniform struct Mapping {
sampler2D ambient;
sampler2D diffuse;
sampler2D specular;
sampler2D normal;
} u_Mapping;
uniform Lamp u_Lamps[MAX_LAMPS_COUNT];
vec3 norm;
vec3 fragPos;
float shadow;
// output colors
layout(location = 0) out vec4 fragColor;
layout(location = 1) out vec4 fragBrightColor;
float calculateShadow(int textureIndex, vec3 lightPos) {
// get vector between fragment position and light position
vec3 fragToLight = fragPos - lightPos;
// use the light to fragment vector to sample from the depth map
float closestDepth = texture(shadowMaps[textureIndex], fragToLight).r;
// it is currently in linear range between [0,1]. Re-transform back to original value
closestDepth *= far_plane;
// now get current linear depth as the length between the fragment and light position
float currentDepth = length(fragToLight);
// now test for shadows
float bias = 0.05;
float shadow = currentDepth - bias > closestDepth ? 1.0 : 0.0;
//fragColor = vec4(vec3(closestDepth / far_plane), 1.0); // visualization
return shadow;
}
float calculateAttenuation(Lamp lamp) {
float distance = length(lamp.lampPos - fragPos);
return 1.0 / (
lamp.kc +
lamp.kl * distance +
lamp.kq * (distance * distance)
);
}
vec4 toVec4(vec3 v) {
return vec4(v, 1);
}
// The entry point for our fragment shader.
void main() {
// Transform the vertex into eye space.
fragPos = vec3(v_MVMatrix * v_Position);
vec3 viewDir = normalize(u_ViewPos - fragPos);
if (v_NormalMapping == 0) norm = vec3(normalize(v_MVMatrix * vec4(v_Normal, 0)));
else { // using normal map if normal mapping enabled
norm = texture2D(u_Mapping.normal, v_Texture).rgb;
norm = normalize(norm * 2.0 - 1.0); // from [0; 1] to [-1; -1]
norm = normalize(v_TBN * norm);
}
vec3 ambientResult = vec3(0, 0, 0); // result of ambient lighting for all lamps
vec3 diffuseResult = vec3(0, 0, 0); // result of diffuse lighting for all lamps
vec3 specularResult = vec3(0, 0, 0); // result of specular lighting for all lamps
for (int i = 0; i<u_LampsCount; i++) {
// attenuation
float attenuation = calculateAttenuation(u_Lamps[i]);
// ambient
vec3 ambient = u_Lamps[i].ambientStrength * u_Lamps[i].lampColor * attenuation;
// diffuse
vec3 lightDir = normalize(u_Lamps[i].lampPos - fragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = u_Lamps[i].diffuseStrength * diff * u_Lamps[i].lampColor * attenuation;
// specular
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), u_Lamps[i].shininess);
vec3 specular = u_Lamps[i].specularStrength * spec * u_Lamps[i].lampColor * attenuation;
// fragment position in light space
//fragLightSpacePos = u_Lamps[i].lightSpaceMatrix * u_Lamps[i].lightModelMatrix * v_Position;
// calculate shadow
shadow = calculateShadow(i, u_Lamps[i].lampPos);
// result for this(i) lamp
ambientResult += ambient;
diffuseResult += diffuse * (1-shadow);
specularResult += specular * (1-shadow);
}
fragColor =
toVec4(ambientResult) * texture2D(u_Mapping.ambient, v_Texture) +
toVec4(diffuseResult) * texture2D(u_Mapping.diffuse, v_Texture) +
toVec4(specularResult) * texture2D(u_Mapping.specular, v_Texture);
// brightness calculation
//float brightness = dot(fragColor.rgb, vec3(0.2126, 0.7152, 0.0722));
//if (brightness > brightnessThreshold) fragBrightColor = vec4(fragColor.rgb, 1.0);
fragBrightColor = vec4(0, 0, 0, 1);
}
Vertex shader:
#version 130
uniform mat4 u_MVPMatrix; // A constant representing the combined model/view/projection matrix.
uniform mat4 u_MVMatrix; // A constant representing the combined model/view matrix.
uniform float u_NormalMapping; // Normal mapping; 0 - false, 1 - true
attribute vec4 a_Position; // Per-vertex position information we will pass in.
attribute vec3 a_Normal; // Per-vertex normal information we will pass in.
attribute vec3 a_Tangent; // Per-vertex tangent information we will pass in.
attribute vec3 a_Bitangent; // Per-vertex bitangent information we will pass in.
attribute vec2 a_Texture; // Per-vertex texture information we will pass in.
varying mat4 v_MVMatrix; // This will be passed into the fragment shader.
varying mat3 v_TBN; // This will be passed into the fragment shader.
varying vec4 v_Position; // This will be passed into the fragment shader.
varying vec3 v_Normal; // This will be passed into the fragment shader.
varying vec2 v_Texture; // This will be passed into the fragment shader.
varying float v_NormalMapping; // This will be passed into the fragment shader.
void main() {
// creating TBN (tangent-bitangent-normal) matrix if normal mapping enabled
if (u_NormalMapping == 1) {
vec3 T = normalize(vec3(u_MVMatrix * vec4(a_Tangent, 0.0)));
vec3 B = normalize(vec3(u_MVMatrix * vec4(a_Bitangent, 0.0)));
vec3 N = normalize(vec3(u_MVMatrix * vec4(a_Normal, 0.0)));
mat3 TBN = mat3(T, B, N);
v_TBN = TBN;
}
// gl_Position is a special variable used to store the final position.
// Multiply the vertex by the matrix to get the final point in normalized screen coordinates.
gl_Position = u_MVPMatrix * a_Position;
// sending all needed variables to fragment shader
v_Position = a_Position;
v_Texture = a_Texture;
v_NormalMapping = u_NormalMapping;
v_MVMatrix = u_MVMatrix;
v_Normal = a_Normal;
}
Vertex shadow shader:
#version 130
attribute vec3 a_Position;
uniform mat4 u_ModelMatrix;
void main() {
gl_Position = u_ModelMatrix * vec4(a_Position, 1.0);
}
Fragment shadow shader:
#version 330 core
in vec4 fragPos;
uniform vec3 lightPos; // cameraViewMatrix * lamp world coordinates
uniform float far_plane = 16;
void main()
{
float lightDistance = length(fragPos.xyz - lightPos);
// map to [0;1] range by dividing by far_plane
lightDistance = lightDistance / far_plane;
// write this as modified depth
gl_FragDepth = lightDistance;
}
Geometry shadow shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices=18) out;
uniform mat4 shadowMatrices[6];
out vec4 fragPos; // FragPos from GS (output per emitvertex)
void main() {
for(int face = 0; face < 6; ++face) {
gl_Layer = face; // built-in variable that specifies to which face we render.
for(int i = 0; i < 3; ++i) // for each triangle's vertices
{
fragPos = gl_in[i].gl_Position;
gl_Position = shadowMatrices[face] * fragPos;
EmitVertex();
}
EndPrimitive();
}
}
And a video demonstrating visualization shadow map:
https://youtu.be/zaNXGG1qLaw
I understand that I missed the transformation somewhere, or did some extra, but where - I really do not know.
The content of shadowMaps[textureIndex] is probably a depth map taken in "light space". This means it is a depth map as seen from the light source.
But
fragPos = vec3(v_MVMatrix * v_Position);
and
struct Lamp {
.....
vec3 lampPos; // in eye space, cameraViewMatrix * lamp world coordinates
.....
};
are in view space coordiantes. This causes that
vec3 fragToLight = fragPos - lightPos;
is a direction in view space, as seen from the camera.
If you do
float closestDepth = texture(shadowMaps[textureIndex], fragToLight).r;
then a "light space" map is accessed by a "view space" vector. The transformation from view space coordiantes to "light space" coordiantes is missing.
To solve the issue you need a matrix which transforms from world coordinates to "light space" coordinates. This is the inverse matrix, of that view projection matrix, which you used, when you create shadowMaps.
mat4 inverse_light_vp_mat[MAX_LAMPS_COUNT];
The fragment position has to be transformed to world coordinates, then it has to be transformed to "light space" coordinates, with inverse_light_vp_mat:
varying mat4 v_ModelMatrix; // Model matrix
vec4 fragLightPos = inverse_light_vp_mat[textureIndex] * v_ModelMatrix * v_Position;
fragLightPos.xyz /= fragLightPos.w;
In "light space" the light position is vec3( 0.0, 0.0, 0.0 ), because the position of the light source is the origin of the "light space". So the look up in the shadowMaps can be done directly with fragLightPos:
float closestDepth = texture(shadowMaps[textureIndex], fragLightPos.xyz).r;
The problem was solved. It was due to the fact that I considered a map of shadows in the camera space (view space), but it was necessary in the world space. Also, during the calculation of the shadow itself, it was also necessary to calculate everything in the world space.
Fragment shader:
vec3 fragToLight = vec3(model * v_Position) - lightPosWorldSpace;
or
vec3 fragToLight = vec3(model * v_Position) - vec3(inverse(view) * lightPos); (lightPos - vec4)
Fragment shadow shader:
float lightDistance = length(fragPos.xyz - lightPos);, lightPos - lamp position in world space
I'm trying to implement this version of ssao with this tutorial:
http://www.learnopengl.com/#!Advanced-Lighting/SSAO
Here is what I end up with for my render textures.
When I move the camera the shadows seem to follow
Seems like I am missing some kind of matrix multiplication with the camera.
CODE
gBuffer Vertex
#version 330 core
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec3 vertexNormal;
out vec3 position;
out vec3 normal;
uniform mat4 m;
uniform mat4 v;
uniform mat4 p;
uniform mat4 n;
void main()
{
vec4 viewPos = v * m * vec4(vertexPosition, 1.0f);
position = viewPos.xyz;
gl_Position = p * viewPos;
normal = vec3(n * vec4(vertexNormal, 0.0f));
}
gBuffer Fragment
#version 330 core
layout (location = 0) out vec4 gPosition;
layout (location = 1) out vec3 gNormal;
layout (location = 2) out vec4 gColor;
in vec3 position;
in vec3 normal;
const float NEAR = 0.1f;
const float FAR = 50.0f;
float LinearizeDepth(float depth)
{
float z = depth * 2.0f - 1.0f;
return (2.0 * NEAR * FAR) / (FAR + NEAR - z * (FAR - NEAR));
}
void main()
{
gPosition.xyz = position;
gPosition.a = LinearizeDepth(gl_FragCoord.z);
gNormal = normalize(normal);
gColor.rgb = vec3(1.0f);
}
SSAO Vertex
#version 330 core
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec2 texCoords;
out vec2 UV;
void main(){
gl_Position = vec4(vertexPosition, 1.0f);
UV = texCoords;
}
SSAO Fragment
#version 330 core
out float FragColor;
in vec2 UV;
uniform sampler2D gPositionDepth;
uniform sampler2D gNormal;
uniform sampler2D texNoise;
uniform vec3 samples[32];
uniform mat4 projection;
// parameters (you'd probably want to use them as uniforms to more easily tweak the effect)
int kernelSize = 32;
float radius = 1.0;
// tile noise texture over screen based on screen dimensions divided by noise size
const vec2 noiseScale = vec2(1024.0f/4.0f, 1024.0f/4.0f);
void main()
{
// Get input for SSAO algorithm
vec3 fragPos = texture(gPositionDepth, UV).xyz;
vec3 normal = texture(gNormal, UV).rgb;
vec3 randomVec = texture(texNoise, UV * noiseScale).xyz;
// Create TBN change-of-basis matrix: from tangent-space to view-space
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
// Iterate over the sample kernel and calculate occlusion factor
float occlusion = 0.0;
for(int i = 0; i < kernelSize; ++i)
{
// get sample position
vec3 sample = TBN * samples[i]; // From tangent to view-space
sample = fragPos + sample * radius;
// project sample position (to sample texture) (to get position on screen/texture)
vec4 offset = vec4(sample, 1.0);
offset = projection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
// get sample depth
float sampleDepth = -texture(gPositionDepth, offset.xy).w; // Get depth value of kernel sample
// range check & accumulate
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth ));
occlusion += (sampleDepth >= sample.z ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / kernelSize);
FragColor = occlusion;
}
I've read around and saw someone had a similar issue and passed the view matrix into the ssao shader and multiplied the sampleDepth:
float sampleDepth = (viewMatrix * -texture(gPositionDepth, offset.xy)).w;
But seems like it just makes things worse.
Heres another view from up top where you can see the shadows move with the camera
If I position my camera in certain ways things line up
Although I can only assume the value of your normal matrix n in the gBuffer vertex shader, it seems like you don't store your normals in view space but in world space. Since the SSAO calculations are done in screen space, this could (at least partially) explain the unexpected behavior. In that case, you either need to multiply your view matrix v to your normals before storing them to the gBuffer (potentially more efficient, but may interfere with your other shading calculations) or after retrieving them.
I am trying to render a scene using normal mapping
Therefore I am calculating the tangent space in C++ and store the binormal and tanget seperately in an array which will be uploaded to my shader using vertexattribpointer.
Here is how I calculate the space
void ObjLoader::computeTangentSpace(MeshData &meshData) {
GLfloat* tangents = new GLfloat[meshData.vertex_position.size()]();
GLfloat* binormals = new GLfloat[meshData.vertex_position.size()]();
std::vector<glm::vec3 > tangent;
std::vector<glm::vec3 > binormal;
for(unsigned int i = 0; i < meshData.indices.size(); i = i+3){
glm::vec3 vertex0 = glm::vec3(meshData.vertex_position.at(meshData.indices.at(i)), meshData.vertex_position.at(meshData.indices.at(i)+1),meshData.vertex_position.at(meshData.indices.at(i)+2));
glm::vec3 vertex1 = glm::vec3(meshData.vertex_position.at(meshData.indices.at(i+1)), meshData.vertex_position.at(meshData.indices.at(i+1)+1),meshData.vertex_position.at(meshData.indices.at(i+1)+2));
glm::vec3 vertex2 = glm::vec3(meshData.vertex_position.at(meshData.indices.at(i+2)), meshData.vertex_position.at(meshData.indices.at(i+2)+1),meshData.vertex_position.at(meshData.indices.at(i+2)+2));
glm::vec3 normal = glm::cross((vertex1 - vertex0),(vertex2 - vertex0));
glm::vec3 deltaPos;
if(vertex0 == vertex1)
deltaPos = vertex2 - vertex0;
else
deltaPos = vertex1 - vertex0;
glm::vec2 uv0 = glm::vec2(meshData.vertex_texcoord.at(meshData.indices.at(i)), meshData.vertex_texcoord.at(meshData.indices.at(i)+1));
glm::vec2 uv1 = glm::vec2(meshData.vertex_texcoord.at(meshData.indices.at(i+1)), meshData.vertex_texcoord.at(meshData.indices.at(i+1)+1));
glm::vec2 uv2 = glm::vec2(meshData.vertex_texcoord.at(meshData.indices.at(i+2)), meshData.vertex_texcoord.at(meshData.indices.at(i+2)+1));
glm::vec2 deltaUV1 = uv1 - uv0;
glm::vec2 deltaUV2 = uv2 - uv0;
glm::vec3 tan; // tangents
glm::vec3 bin; // binormal
// avoid divion with 0
if(deltaUV1.s != 0)
tan = deltaPos / deltaUV1.s;
else
tan = deltaPos / 1.0f;
tan = glm::normalize(tan - glm::dot(normal,tan)*normal);
bin = glm::normalize(glm::cross(tan, normal));
// write into array - for each vertex of the face the same value
tangents[meshData.indices.at(i)] = tan.x;
tangents[meshData.indices.at(i)+1] = tan.y;
tangents[meshData.indices.at(i)+2] = tan.z;
tangents[meshData.indices.at(i+1)] = tan.x;
tangents[meshData.indices.at(i+1)+1] = tan.y;
tangents[meshData.indices.at(i+1)+2] = tan.z;
tangents[meshData.indices.at(i+2)] = tan.x;
tangents[meshData.indices.at(i+2)+1] = tan.y;
tangents[meshData.indices.at(i+2)+1] = tan.z;
binormals[meshData.indices.at(i)] = bin.x;
binormals[meshData.indices.at(i)+1] = bin.y;
binormals[meshData.indices.at(i)+2] = bin.z;
binormals[meshData.indices.at(i+1)] = bin.x;
binormals[meshData.indices.at(i+1)+1] = bin.y;
binormals[meshData.indices.at(i+1)+2] = bin.z;
binormals[meshData.indices.at(i+2)] = bin.x;
binormals[meshData.indices.at(i+2)+1] = bin.y;
binormals[meshData.indices.at(i+2)+1] = bin.z;
}
// Copy the tangent and binormal to meshData
for(unsigned int i = 0; i < meshData.vertex_position.size(); i++){
meshData.vertex_tangent.push_back(tangents[i]);
meshData.vertex_binormal.push_back(binormals[i]);
}
}
And here are my vertex and fragment shader
Vertex Shader
#version 330
layout(location = 0) in vec3 vertex;
layout(location = 1) in vec3 vertex_normal;
layout(location = 2) in vec2 vertex_texcoord;
layout(location = 3) in vec3 vertex_tangent;
layout(location = 4) in vec3 vertex_binormal;
struct LightSource {
vec3 ambient_color;
vec3 diffuse_color;
vec3 specular_color;
vec3 position;
};
uniform vec3 lightPos;
out vec3 vertexNormal;
out vec3 eyeDir;
out vec3 lightDir;
out vec2 textureCoord;
uniform mat4 view;
uniform mat4 modelview;
uniform mat4 projection;
out vec4 myColor;
void main() {
mat4 normalMatrix = transpose(inverse(modelview));
gl_Position = projection * modelview * vec4(vertex, 1.0);
vec4 binormal = modelview * vec4(vertex_binormal,1);
vec4 tangent = modelview * vec4(vertex_tangent,1);
vec4 normal = vec4(vertex_normal,1);
mat3 tangentMatrix = mat3(tangent.xyz,binormal.xyz,normal.xyz);
vec3 vertexInCamSpace = (modelview * vec4(vertex, 1.0)).xyz;
eyeDir = tangentMatrix * normalize( -vertexInCamSpace);
vec3 lightInCamSpace = (view * vec4(lightPos, 1.0)).xyz;
lightDir = tangentMatrix * normalize((lightInCamSpace - vertexInCamSpace));
textureCoord = vertex_texcoord;
}
Fragment Shader
#version 330
struct LightSource {
vec3 ambient_color;
vec3 diffuse_color;
vec3 specular_color;
vec3 position;
};
struct Material {
vec3 ambient_color;
vec3 diffuse_color;
vec3 specular_color;
float specular_shininess;
};
uniform LightSource light;
uniform Material material;
in vec3 vertexNormal;
in vec3 eyeDir;
in vec3 lightDir;
in vec2 textureCoord;
uniform sampler2D texture;
uniform sampler2D normals;
out vec4 color;
in vec4 myColor;
in vec3 bin;
in vec3 tan;
void main() {
vec3 diffuse = texture2D(texture,textureCoord).rgb;
vec3 E = normalize(eyeDir);
vec3 N = texture2D(normals,textureCoord).xyz;
N = (N - 0.5) * 2.0;
vec3 ambientTerm = vec3(0);
vec3 diffuseTerm = vec3(0);
vec3 specularTerm = vec3(0);
vec3 L, H;
L = normalize(lightDir);
H = normalize(E + L);
ambientTerm += light.ambient_color;
diffuseTerm += light.diffuse_color * max(dot(L, N), 0);
specularTerm += light.specular_color * pow(max(dot(H, N), 0), material.specular_shininess);
ambientTerm *= material.ambient_color;
diffuseTerm *= material.diffuse_color;
specularTerm *= material.specular_color;
color = vec4(diffuse, 1) * vec4(ambientTerm + diffuseTerm + specularTerm, 1);
}
The problem is that sometimes I dont have values for tangent and binormal in the shader.. Here are three screenshots which I hope will clearify my problem:
This is how the scene currently looks like when I render it with the code above:
This is how the scene looks like, when I use lightDir as color
And the third shows the scene with eyeDir as color
All the pictures are taken from the same angle without moving camera or rotating anything.
I've already compared my code to several different sources in the www but I didn't found the error I've done...
Additional information:
I am iterating over all current faces. Three indices will give me one triangle. The UV values for each vertex are stored at the same index. having a lot of debugging there, I am very sure that this are the correct values as I can find the right values in the .obj file when searching using gedit.
After calculating tangent and binormal I am storing the normal at the same index as the vertex position is in the array. For my understanding this should give me the correct position and I am calculating this for each vertex. For each vertex in a face I am using the same tangent basis, which is maybe later overwritten when another face is using this vertex, this could mess up my final result but only in very small details...
EDIT:
For any other questions, here is the whole project:
http://www.incentivelabs.de/Sourcecode/normal_mapping.zip
In your vertex shader you have:
vec4 binormal = modelview * vec4(vertex_binormal,1);
vec4 tangent = modelview * vec4(vertex_tangent,1);
vec4 normal = vec4(vertex_normal,1);
This should be:
vec4 binormal = modelview * vec4(vertex_binormal,0);
vec4 tangent = modelview * vec4(vertex_tangent,0);
vec4 normal = modelview * vec4(vertex_normal,0);
Note the '0' instead of '1' (also I'm assuming you meant to transform your normal too). You use '0' here because you want to ignore the translation part of the modelview transformation (you're transforming a vector not a point).