Calculate tangent space in C++ - c++

I am trying to render a scene using normal mapping
Therefore I am calculating the tangent space in C++ and store the binormal and tanget seperately in an array which will be uploaded to my shader using vertexattribpointer.
Here is how I calculate the space
void ObjLoader::computeTangentSpace(MeshData &meshData) {
GLfloat* tangents = new GLfloat[meshData.vertex_position.size()]();
GLfloat* binormals = new GLfloat[meshData.vertex_position.size()]();
std::vector<glm::vec3 > tangent;
std::vector<glm::vec3 > binormal;
for(unsigned int i = 0; i < meshData.indices.size(); i = i+3){
glm::vec3 vertex0 = glm::vec3(meshData.vertex_position.at(meshData.indices.at(i)), meshData.vertex_position.at(meshData.indices.at(i)+1),meshData.vertex_position.at(meshData.indices.at(i)+2));
glm::vec3 vertex1 = glm::vec3(meshData.vertex_position.at(meshData.indices.at(i+1)), meshData.vertex_position.at(meshData.indices.at(i+1)+1),meshData.vertex_position.at(meshData.indices.at(i+1)+2));
glm::vec3 vertex2 = glm::vec3(meshData.vertex_position.at(meshData.indices.at(i+2)), meshData.vertex_position.at(meshData.indices.at(i+2)+1),meshData.vertex_position.at(meshData.indices.at(i+2)+2));
glm::vec3 normal = glm::cross((vertex1 - vertex0),(vertex2 - vertex0));
glm::vec3 deltaPos;
if(vertex0 == vertex1)
deltaPos = vertex2 - vertex0;
else
deltaPos = vertex1 - vertex0;
glm::vec2 uv0 = glm::vec2(meshData.vertex_texcoord.at(meshData.indices.at(i)), meshData.vertex_texcoord.at(meshData.indices.at(i)+1));
glm::vec2 uv1 = glm::vec2(meshData.vertex_texcoord.at(meshData.indices.at(i+1)), meshData.vertex_texcoord.at(meshData.indices.at(i+1)+1));
glm::vec2 uv2 = glm::vec2(meshData.vertex_texcoord.at(meshData.indices.at(i+2)), meshData.vertex_texcoord.at(meshData.indices.at(i+2)+1));
glm::vec2 deltaUV1 = uv1 - uv0;
glm::vec2 deltaUV2 = uv2 - uv0;
glm::vec3 tan; // tangents
glm::vec3 bin; // binormal
// avoid divion with 0
if(deltaUV1.s != 0)
tan = deltaPos / deltaUV1.s;
else
tan = deltaPos / 1.0f;
tan = glm::normalize(tan - glm::dot(normal,tan)*normal);
bin = glm::normalize(glm::cross(tan, normal));
// write into array - for each vertex of the face the same value
tangents[meshData.indices.at(i)] = tan.x;
tangents[meshData.indices.at(i)+1] = tan.y;
tangents[meshData.indices.at(i)+2] = tan.z;
tangents[meshData.indices.at(i+1)] = tan.x;
tangents[meshData.indices.at(i+1)+1] = tan.y;
tangents[meshData.indices.at(i+1)+2] = tan.z;
tangents[meshData.indices.at(i+2)] = tan.x;
tangents[meshData.indices.at(i+2)+1] = tan.y;
tangents[meshData.indices.at(i+2)+1] = tan.z;
binormals[meshData.indices.at(i)] = bin.x;
binormals[meshData.indices.at(i)+1] = bin.y;
binormals[meshData.indices.at(i)+2] = bin.z;
binormals[meshData.indices.at(i+1)] = bin.x;
binormals[meshData.indices.at(i+1)+1] = bin.y;
binormals[meshData.indices.at(i+1)+2] = bin.z;
binormals[meshData.indices.at(i+2)] = bin.x;
binormals[meshData.indices.at(i+2)+1] = bin.y;
binormals[meshData.indices.at(i+2)+1] = bin.z;
}
// Copy the tangent and binormal to meshData
for(unsigned int i = 0; i < meshData.vertex_position.size(); i++){
meshData.vertex_tangent.push_back(tangents[i]);
meshData.vertex_binormal.push_back(binormals[i]);
}
}
And here are my vertex and fragment shader
Vertex Shader
#version 330
layout(location = 0) in vec3 vertex;
layout(location = 1) in vec3 vertex_normal;
layout(location = 2) in vec2 vertex_texcoord;
layout(location = 3) in vec3 vertex_tangent;
layout(location = 4) in vec3 vertex_binormal;
struct LightSource {
vec3 ambient_color;
vec3 diffuse_color;
vec3 specular_color;
vec3 position;
};
uniform vec3 lightPos;
out vec3 vertexNormal;
out vec3 eyeDir;
out vec3 lightDir;
out vec2 textureCoord;
uniform mat4 view;
uniform mat4 modelview;
uniform mat4 projection;
out vec4 myColor;
void main() {
mat4 normalMatrix = transpose(inverse(modelview));
gl_Position = projection * modelview * vec4(vertex, 1.0);
vec4 binormal = modelview * vec4(vertex_binormal,1);
vec4 tangent = modelview * vec4(vertex_tangent,1);
vec4 normal = vec4(vertex_normal,1);
mat3 tangentMatrix = mat3(tangent.xyz,binormal.xyz,normal.xyz);
vec3 vertexInCamSpace = (modelview * vec4(vertex, 1.0)).xyz;
eyeDir = tangentMatrix * normalize( -vertexInCamSpace);
vec3 lightInCamSpace = (view * vec4(lightPos, 1.0)).xyz;
lightDir = tangentMatrix * normalize((lightInCamSpace - vertexInCamSpace));
textureCoord = vertex_texcoord;
}
Fragment Shader
#version 330
struct LightSource {
vec3 ambient_color;
vec3 diffuse_color;
vec3 specular_color;
vec3 position;
};
struct Material {
vec3 ambient_color;
vec3 diffuse_color;
vec3 specular_color;
float specular_shininess;
};
uniform LightSource light;
uniform Material material;
in vec3 vertexNormal;
in vec3 eyeDir;
in vec3 lightDir;
in vec2 textureCoord;
uniform sampler2D texture;
uniform sampler2D normals;
out vec4 color;
in vec4 myColor;
in vec3 bin;
in vec3 tan;
void main() {
vec3 diffuse = texture2D(texture,textureCoord).rgb;
vec3 E = normalize(eyeDir);
vec3 N = texture2D(normals,textureCoord).xyz;
N = (N - 0.5) * 2.0;
vec3 ambientTerm = vec3(0);
vec3 diffuseTerm = vec3(0);
vec3 specularTerm = vec3(0);
vec3 L, H;
L = normalize(lightDir);
H = normalize(E + L);
ambientTerm += light.ambient_color;
diffuseTerm += light.diffuse_color * max(dot(L, N), 0);
specularTerm += light.specular_color * pow(max(dot(H, N), 0), material.specular_shininess);
ambientTerm *= material.ambient_color;
diffuseTerm *= material.diffuse_color;
specularTerm *= material.specular_color;
color = vec4(diffuse, 1) * vec4(ambientTerm + diffuseTerm + specularTerm, 1);
}
The problem is that sometimes I dont have values for tangent and binormal in the shader.. Here are three screenshots which I hope will clearify my problem:
This is how the scene currently looks like when I render it with the code above:
This is how the scene looks like, when I use lightDir as color
And the third shows the scene with eyeDir as color
All the pictures are taken from the same angle without moving camera or rotating anything.
I've already compared my code to several different sources in the www but I didn't found the error I've done...
Additional information:
I am iterating over all current faces. Three indices will give me one triangle. The UV values for each vertex are stored at the same index. having a lot of debugging there, I am very sure that this are the correct values as I can find the right values in the .obj file when searching using gedit.
After calculating tangent and binormal I am storing the normal at the same index as the vertex position is in the array. For my understanding this should give me the correct position and I am calculating this for each vertex. For each vertex in a face I am using the same tangent basis, which is maybe later overwritten when another face is using this vertex, this could mess up my final result but only in very small details...
EDIT:
For any other questions, here is the whole project:
http://www.incentivelabs.de/Sourcecode/normal_mapping.zip

In your vertex shader you have:
vec4 binormal = modelview * vec4(vertex_binormal,1);
vec4 tangent = modelview * vec4(vertex_tangent,1);
vec4 normal = vec4(vertex_normal,1);
This should be:
vec4 binormal = modelview * vec4(vertex_binormal,0);
vec4 tangent = modelview * vec4(vertex_tangent,0);
vec4 normal = modelview * vec4(vertex_normal,0);
Note the '0' instead of '1' (also I'm assuming you meant to transform your normal too). You use '0' here because you want to ignore the translation part of the modelview transformation (you're transforming a vector not a point).

Related

Weird Layered Effect During Parallax Mapping

I am following along with the LearnOpenGL guide and am trying to implement Steep Parallax Mapping.
Everything seems to be working fine except my brick wall seems to have distinct visible layers whereas the photos in the guide don't show any layers. I was trying to use this code to parallax the topography of the world but these weird layers seem to show up there too so I was hoping to find a fix for this.
Layered wall photo
[1
Photo of how it should look
Here is my modified vertex shader
#version 300 es
in vec4 vPosition; // aPos
in vec2 texCoord; // aTexCoords
in vec4 vNormal; // aNormal
in vec4 vTangent; // aTangent
uniform mat4 model_view;
uniform mat4 projection;
uniform vec4 light_position;
out vec2 ftexCoord;
out vec3 vT;
out vec3 vN;
out vec4 position;
out vec3 FragPos;
out vec3 TangentLightPos;
out vec3 TangentViewPos;
out vec3 TangentFragPos;
void
main()
{
// Normal variables
vN = normalize(model_view * vNormal).xyz;
vT = normalize(model_view * vTangent).xyz;
vec4 veyepos = model_view*vPosition;
position = veyepos;
ftexCoord = texCoord;
// Displacement variables
vec3 bi = cross(vT, vN);
FragPos = vec3(model_view * vPosition).xyz;
vec3 T = normalize(mat3(model_view) * vTangent.xyz);
vec3 B = normalize(mat3(model_view) * bi);
vec3 N = normalize(mat3(model_view) * vNormal.xyz);
mat3 TBN = transpose(mat3(T, B, N));
TangentLightPos = TBN * light_position.xyz;
TangentViewPos = TBN * vPosition.xyz;
TangentFragPos = TBN * FragPos;
gl_Position = projection * model_view * vPosition;
}
and my modified fragment shader is here
#version 300 es
precision highp float;
in vec2 ftexCoord;
in vec3 vT; //parallel to surface in eye space
in vec3 vN; //perpendicular to surface in eye space
in vec4 position;
in vec3 FragPos;
in vec3 TangentLightPos;
in vec3 TangentViewPos;
in vec3 TangentFragPos;
uniform int mode;
uniform vec4 light_position;
uniform vec4 light_color;
uniform vec4 ambient_light;
uniform sampler2D colorMap;
uniform sampler2D normalMap;
uniform sampler2D depthMap;
out vec4 fColor;
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
return currentTexCoords;
}
void main()
{
// DO NORMAL MAPPING
if (mode == 0) {
vec3 T = normalize(vT);
vec3 N = normalize(vN);
vec3 bi = cross(T, N);
mat4 changeOfCoord = mat4(vec4(T, 0), vec4(bi, 0), vec4(N, 0), vec4(0, 0, 0, 1));
vec3 L = normalize(light_position - position).xyz;
vec3 E = normalize(-position).xyz;
vec4 text = vec4(texture(normalMap, ftexCoord) * 2.0 - 1.0);
vec4 eye = changeOfCoord * text;
vec4 amb = texture(colorMap, ftexCoord) * ambient_light;
vec4 diff = max(0.0, dot(L, eye.xyz)) * light_color * texture(colorMap, ftexCoord);
fColor = amb + diff;
} else if (mode == 1) { // DO PARALLAX MAPPING
// offset texture coordinates with Parallax Mapping
vec3 viewDir = normalize(TangentViewPos - TangentFragPos);
vec2 texCoords = ftexCoord;
texCoords = ParallaxMapping(ftexCoord, viewDir);
// discard samples outside of the default texture coordinate space
if(texCoords.x > 1.0 || texCoords.y > 1.0 || texCoords.x < 0.0 || texCoords.y < 0.0)
discard;
// obtain normal from normal map
vec3 normal = texture(normalMap, texCoords).rgb;
//values stored in normal texture is [0,1] range, we need [-1, 1] range
normal = normalize(normal * 2.0 - 1.0);
// get diffuse color
vec3 color = texture(colorMap, texCoords).rgb;
// ambient
vec3 ambient = 0.1 * color;
// diffuse
vec3 lightDir = normalize(TangentLightPos - TangentFragPos);
float diff = max(dot(lightDir, normal), 0.0);
vec3 diffuse = diff * color;
// specular
vec3 reflectDir = reflect(lightDir, normal);
vec3 halfwayDir = normalize(lightDir + viewDir);
float spec = pow(max(dot(normal, halfwayDir), 0.0), 32.0);
vec3 specular = vec3(0.2) * spec;
fColor = vec4(ambient + diffuse + 0.0, 1.0);
}
}
The layers at acute gazing angles are a common effect at parallax mapping. To improve the result you've to increment the number of samples or implement Parallax Occlusion Mapping (as described in the bottom part of the tutorial):
// STEEP PARALLAX MAPPING
vec2 ParallaxMapping(vec2 texCoords, vec3 viewDir)
{
// number of depth layers
const float minLayers = 8.0;
const float maxLayers = 32.0;
float numLayers = mix(maxLayers, minLayers, abs(dot(vec3(0.0, 0.0, 1.0), viewDir)));
// calculate the size of each layer
float layerDepth = 1.0 / numLayers;
// depth of current layer
float currentLayerDepth = 0.0;
// the amount to shift the texture coordinates per layer (from vector P)
vec2 P = viewDir.xy / viewDir.z * 0.1;
vec2 deltaTexCoords = P / numLayers;
// get initial values
vec2 currentTexCoords = texCoords;
float currentDepthMapValue = texture(depthMap, currentTexCoords).r;
while(currentLayerDepth < currentDepthMapValue)
{
// shift texture coordinates along direction of P
currentTexCoords -= deltaTexCoords;
// get depthmap value at current texture coordinates
currentDepthMapValue = texture(depthMap, currentTexCoords).r;
// get depth of next layer
currentLayerDepth += layerDepth;
}
// get texture coordinates before collision (reverse operations)
vec2 prevTexCoords = currentTexCoords + deltaTexCoords;
// get depth after and before collision for linear interpolation
float afterDepth = currentDepthMapValue - currentLayerDepth;
float beforeDepth = texture(depthMap, prevTexCoords).r - currentLayerDepth + layerDepth;
// interpolation of texture coordinates
float weight = afterDepth / (afterDepth - beforeDepth);
vec2 finalTexCoords = prevTexCoords * weight + currentTexCoords * (1.0 - weight);
return finalTexCoords;
}
By thee way, the vector seems to be inverted. In common the bitangent is the Cross product of the normal vector and the tangent in a Right-handed system. But that depends on the displacement texture.
vec3 bi = cross(vT, vN);
vec3 bi = cross(vN, vT);
See further:
Bump Mapping with javascript and glsl
Normal, Parallax and Relief mapping
Demo

SSAO shading moves weird with camera (calculating gbuffer wrong)

I'm trying to implement this version of ssao with this tutorial:
http://www.learnopengl.com/#!Advanced-Lighting/SSAO
Here is what I end up with for my render textures.
When I move the camera the shadows seem to follow
Seems like I am missing some kind of matrix multiplication with the camera.
CODE
gBuffer Vertex
#version 330 core
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec3 vertexNormal;
out vec3 position;
out vec3 normal;
uniform mat4 m;
uniform mat4 v;
uniform mat4 p;
uniform mat4 n;
void main()
{
vec4 viewPos = v * m * vec4(vertexPosition, 1.0f);
position = viewPos.xyz;
gl_Position = p * viewPos;
normal = vec3(n * vec4(vertexNormal, 0.0f));
}
gBuffer Fragment
#version 330 core
layout (location = 0) out vec4 gPosition;
layout (location = 1) out vec3 gNormal;
layout (location = 2) out vec4 gColor;
in vec3 position;
in vec3 normal;
const float NEAR = 0.1f;
const float FAR = 50.0f;
float LinearizeDepth(float depth)
{
float z = depth * 2.0f - 1.0f;
return (2.0 * NEAR * FAR) / (FAR + NEAR - z * (FAR - NEAR));
}
void main()
{
gPosition.xyz = position;
gPosition.a = LinearizeDepth(gl_FragCoord.z);
gNormal = normalize(normal);
gColor.rgb = vec3(1.0f);
}
SSAO Vertex
#version 330 core
layout (location = 0) in vec3 vertexPosition;
layout (location = 1) in vec2 texCoords;
out vec2 UV;
void main(){
gl_Position = vec4(vertexPosition, 1.0f);
UV = texCoords;
}
SSAO Fragment
#version 330 core
out float FragColor;
in vec2 UV;
uniform sampler2D gPositionDepth;
uniform sampler2D gNormal;
uniform sampler2D texNoise;
uniform vec3 samples[32];
uniform mat4 projection;
// parameters (you'd probably want to use them as uniforms to more easily tweak the effect)
int kernelSize = 32;
float radius = 1.0;
// tile noise texture over screen based on screen dimensions divided by noise size
const vec2 noiseScale = vec2(1024.0f/4.0f, 1024.0f/4.0f);
void main()
{
// Get input for SSAO algorithm
vec3 fragPos = texture(gPositionDepth, UV).xyz;
vec3 normal = texture(gNormal, UV).rgb;
vec3 randomVec = texture(texNoise, UV * noiseScale).xyz;
// Create TBN change-of-basis matrix: from tangent-space to view-space
vec3 tangent = normalize(randomVec - normal * dot(randomVec, normal));
vec3 bitangent = cross(normal, tangent);
mat3 TBN = mat3(tangent, bitangent, normal);
// Iterate over the sample kernel and calculate occlusion factor
float occlusion = 0.0;
for(int i = 0; i < kernelSize; ++i)
{
// get sample position
vec3 sample = TBN * samples[i]; // From tangent to view-space
sample = fragPos + sample * radius;
// project sample position (to sample texture) (to get position on screen/texture)
vec4 offset = vec4(sample, 1.0);
offset = projection * offset; // from view to clip-space
offset.xyz /= offset.w; // perspective divide
offset.xyz = offset.xyz * 0.5 + 0.5; // transform to range 0.0 - 1.0
// get sample depth
float sampleDepth = -texture(gPositionDepth, offset.xy).w; // Get depth value of kernel sample
// range check & accumulate
float rangeCheck = smoothstep(0.0, 1.0, radius / abs(fragPos.z - sampleDepth ));
occlusion += (sampleDepth >= sample.z ? 1.0 : 0.0) * rangeCheck;
}
occlusion = 1.0 - (occlusion / kernelSize);
FragColor = occlusion;
}
I've read around and saw someone had a similar issue and passed the view matrix into the ssao shader and multiplied the sampleDepth:
float sampleDepth = (viewMatrix * -texture(gPositionDepth, offset.xy)).w;
But seems like it just makes things worse.
Heres another view from up top where you can see the shadows move with the camera
If I position my camera in certain ways things line up
Although I can only assume the value of your normal matrix n in the gBuffer vertex shader, it seems like you don't store your normals in view space but in world space. Since the SSAO calculations are done in screen space, this could (at least partially) explain the unexpected behavior. In that case, you either need to multiply your view matrix v to your normals before storing them to the gBuffer (potentially more efficient, but may interfere with your other shading calculations) or after retrieving them.

Implementing Normal Mapping using OpenGL/GLSL

I'm learning GLSL and trying to implement some lighting and mapping tricks. I'm working with ShaderDesigner tool. After coding normal mapping I recognized that my model illumination looks not real. Here is my code and some pictures. If it possible tell me what is my problem.
Vertex Shader
#define MAX_LIGHTS 1
struct LightProps
{
vec3 direction[MAX_LIGHTS];
};
attribute vec3 tangent;
attribute vec3 bitangent;
varying LightProps lights;
void main()
{
vec3 N = normalize(gl_NormalMatrix*gl_Normal);
vec3 T = normalize(gl_NormalMatrix*tangent);
vec3 B = normalize(gl_NormalMatrix*bitangent);
mat3 TBNMatrix = mat3(T,B,N);
vec4 vertex = gl_ModelViewMatrix*gl_Vertex;
for(int i = 0; i < MAX_LIGHTS; i++)
{
vec4 lightPos = gl_LightSource[i].position;
lights.direction[i] = vec3(lightPos.w > 0 ? lightPos-vertex : lightPos);
lights.direction[i] *= TBNMatrix;
}
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = gl_ModelViewProjectionMatrix*gl_Vertex;
}
Fragment Shader
#define MAX_LIGHTS 1
struct LightProps
{
vec3 direction[MAX_LIGHTS];
};
uniform sampler2D textureUnit;
uniform sampler2D normalTextureUnit;
uniform vec4 TexColor;
varying LightProps lights;
void main()
{
vec3 N = normalize(texture2D(normalTextureUnit,gl_TexCoord[0].st).rgb*2.0-1.0);
vec4 color = vec4(0,0,0,0);
for(int i = 0; i < MAX_LIGHTS; i++)
{
vec3 L = lights.direction[i];
float dist = length(L);
L = normalize(L);
float NdotL = max(dot(N,L),0.0);
if(NdotL > 0)
{
float att = 1.0;
if(gl_LightSource[i].position.w > 0)
{
att = 1.0/ (gl_LightSource[i].constantAttenuation +
gl_LightSource[i].linearAttenuation * dist +
gl_LightSource[i].quadraticAttenuation * dist * dist);
}
vec4 ambient = gl_FrontLightProduct[i].ambient;
vec4 diffuse = clamp(att*NdotL*gl_FrontLightProduct[i].diffuse,0,1);
color += att*(ambient+diffuse);
}
}
vec4 textureColor = texture2D(textureUnit, vec2(gl_TexCoord[0]));
gl_FragColor = TexColor*textureColor + gl_FrontLightModelProduct.sceneColor + color;
}
I set TexColor to (0.3,0.3,0.3,1.0) and take screenshots:
Screenshot
  
There is little bit lighting when I rotate camera and light to left,
but when I rotate to right the plane got fully illuminated.I think there is something wrong because without normal mapping plane looks same from to sides. Here is normal texture. Thanks in advance.
Normal Map:
  
That normal map is in tangent-space, but you are treating it as object-space.
You need a bitangent and/or tangent vector per-vertex in addition to your normal in order to form the basis to perform transformation into and out of tangent-space. This matrix is often referred to as simply TBN.
You have two options here:
Transform all of your lighting direction vectors into tangent-space
Useful for forward-shading, can be done in a vertex shader
Transform your normal map from tangent-space back to view-space
Required by deferred-shading, must be done in fragment shader
Both options require the construction of a TBN matrix, and if your tangent-space basis is orthogonal (modeling software like Assimp can be configured to do this for you) you can transpose the TBN matrix to do either one.
You are implementing forward-shading, so solution #1 is the approach you should take.
Below is a rough overview of the necessary steps for solution #1. Ordinarily you would do the calculation of the lighting direction vector in the vertex shader for better performance.
Vertex Shader to Transform of Lighting Vectors into Tangent-space:
attribute vec3 tangent;
attribute vec3 bitangent;
varying vec3 N;
varying vec3 V;
varying vec3 E;
varying vec3 T;
varying vec3 B;
void main()
{
N = normalize(gl_NormalMatrix*gl_Normal);
V = vec3(gl_ModelViewMatrix*gl_Vertex);
E = normalize(-V);
T = normalize(gl_NormalMatrix*tangent);
B = normalize(gl_NormalMatrix*bitangent);
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = gl_ModelViewProjectionMatrix*gl_Vertex;
}
Fragment Shader to Transform Lighting Vectors into Tangent-space:
varying vec3 N;
varying vec3 V;
varying vec3 E;
varying vec3 B;
varying vec3 T;
uniform sampler2D textureUnit;
uniform sampler2D normalTextureUnit;
uniform vec4 TexColor;
#define MAX_LIGHTS 1
void main()
{
// Construct Tangent Space Basis
mat3 TBN = mat3 (T, B, N);
vec3 normal = normalize (texture2D(normalTextureUnit,gl_TexCoord[0].st).xyz*2.0 - 1.0);
vec4 color = vec4(0,0,0,0);
for(int i = 0; i < MAX_LIGHTS; i++)
{
vec4 lightPos = gl_LightSource[i].position;
vec3 L = lightPos.w > 0 ? lightPos.xyz - V : lightPos;
L *= TBN; // Transform into tangent-space
float dist = length(L);
L = normalize(L);
float NdotL = max(dot(L,N),0.0);
if(NdotL > 0)
{
float att = 1.0;
if(lightPos.w > 0)
{
att = 1.0/ (gl_LightSource[i].constantAttenuation +
gl_LightSource[i].linearAttenuation * dist +
gl_LightSource[i].quadraticAttenuation * dist * dist);
}
vec4 diffuse = clamp(att*NdotL*gl_FrontLightProduct[i].diffuse,0,1);
color += att*gl_FrontLightProduct[i].ambient + diffuse;
}
}
vec4 textureColor = texture2D(textureUnit, vec2(gl_TexCoord[0]));
gl_FragColor = TexColor*textureColor + gl_FrontLightModelProduct.sceneColor + color;
}
There is a tutorial here that should fill in the gaps and explain how to compute tangent and bitangent.

Specular Light appears on both sides

I have a very strange behaviour of specular(phong light model) light. It seems to be appering on both sides of all objects. Does anyone know what could be the issue ?
The actual calculation seems to be alright, as I can see that the light changes its position as object rotates.
#version 330
in vec4 CameraPos0;
in vec3 Pos0;
in vec4 Colour0;
in vec3 Normal0;
out vec4 FragColor;
// Ambient light parameters
uniform vec3 gAmbientLightIntensity;
// Directional light parameters
uniform vec3 gDirectionalLightIntensity;
uniform vec3 gDirectionalLightDirection;
// Specular light parameter
uniform vec3 gSpecularLightIntensity;
uniform vec3 gLightSourcePosition;
uniform vec3 gCameraPosition;
// Material constants
uniform float gKa;
uniform float gKd;
uniform float gKs;
void main()
{
// Calculate the ambient light intensity at the vertex
// Ia = Ka * ambientLightIntensity
vec4 ambientLightIntensity = gKa * vec4(gAmbientLightIntensity, 1.0);
// Setup the light direction and normalise it
vec3 lightDirection = normalize(-gDirectionalLightDirection);
//lightDirection = normalize(gDirectionalLightDirection);
// Id = kd * lightItensity * N.L
// Calculate N.L
float diffuseFactor = dot(Normal0, lightDirection);
diffuseFactor = clamp(diffuseFactor, 0.0, 1.0);
// N.L * light source colour * intensity
vec4 diffuseLightIntensity = gKd * vec4(gDirectionalLightIntensity, 1.0f) * diffuseFactor;
// Phong light
vec3 L = normalize(gLightSourcePosition - Pos0);
vec3 V = normalize(-Pos0);
vec3 R = normalize(2 * Normal0 * dot(Normal0, L) - L);
float specularFactor = pow(dot(R, V), 0.1f);
vec4 specularLightIntensity = gKs * vec4(gSpecularLightIntensity, 1.0f) * specularFactor;
specularLightIntensity = clamp(specularLightIntensity, 0.0, 1.0);
// Final vertex colour is the product of the vertex colour
// and the total light intensity at the vertex
vec4 lightedFragColor = Colour0 * (ambientLightIntensity + diffuseLightIntensity + specularLightIntensity);
FragColor = lightedFragColor;
}
Vertex Shader
#version 330
layout (location = 0) in vec3 Position;
layout (location = 1) in vec3 Normal;
layout (location = 2) in vec4 Colour;
out vec3 Pos0;
out vec4 Colour0;
out vec3 Normal0;
out vec4 CameraPos0;
uniform mat4 gModelToWorldTransform;
uniform mat4 gWorldToViewTransform;
uniform mat4 gProjectionTransform;
void main()
{
vec4 vertexPositionInModelSpace = vec4(Position, 1);
vec4 vertexInWorldSpace = gModelToWorldTransform * vertexPositionInModelSpace;
vec4 vertexInViewSpace = gWorldToViewTransform * vertexInWorldSpace;
vec4 vertexInHomogeneousClipSpace = gProjectionTransform * vertexInViewSpace;
gl_Position = vertexInHomogeneousClipSpace;
vec3 normalInWorldSpace = (gModelToWorldTransform * vec4(Normal, 0.0)).xyz;
normalInWorldSpace = normalize(normalInWorldSpace);
Normal0 = normalInWorldSpace;
CameraPos0 = vertexInViewSpace;
Pos0 = vertexInWorldSpace.xyz;
Colour0 = Colour;
}
you need to clamp the dot result from the saturation calculus because on the back side the result is negative and the pow can return a positive number instead of clamping it to zero.
float specularFactor = pow(clamp(dot(R, V),0.0,1.0), 0.1f);
Edit:
Also the V should be a vector pointing to the camera position, not to the vertex position in world space:
vec3 V = normalize(CameraPos0 - Pos0);

Normal Mapping and translation disrupts my lighting

I got a normal mapping issue. I have a texture and a normal texture on each model loaded via the ASSIMP library. I am calculating the tangent vectors on each object with the help of the ASSIMP library so these should be fine. The objects work perfectly with normal mapping but as soon as I start translating one of the objects (thus influence the Model matrix with translations) the lighting fails. As you can see on the image, the floor (which is translated down the y axis) seems to lose most of its diffuse lighting and its specular lighting is in the wrong direction (it should be between the lightbulb and the player position)
It might have something to do with the normal matrix (although translations should be lost), maybe something with a wrong matrix used in the shaders. I am out of ideas and was hoping you could shed some insight into the issue.
Vertex shader:
#version 330
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec3 tangent;
layout(location = 3) in vec3 color;
layout(location = 4) in vec2 texCoord;
// fragment pass through
out vec3 Position;
out vec3 Normal;
out vec3 Tangent;
out vec3 Color;
out vec2 TexCoord;
out vec3 TangentSurface2Light;
out vec3 TangentSurface2View;
uniform vec3 lightPos;
uniform vec3 playerPos;
// vertex transformation
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
mat3 normalMatrix = mat3(transpose(inverse(model)));
Position = vec3(model * vec4(position, 1.0));
Normal = normalMatrix * normal;
Tangent = tangent;
Color = color;
TexCoord = texCoord;
gl_Position = projection * view * model * vec4(position, 1.0);
// Calculate tangent matrix and calculate fragment bump mapping coord space.
vec3 light = lightPos;
vec3 n = normalize(normalMatrix * normal);
vec3 t = normalize(normalMatrix * tangent);
vec3 b = cross(n, t);
// create matrix for tangent (from vertex to tangent-space)
mat3 mat = mat3(t.x, b.x ,n.x, t.y, b.y ,n.y, t.z, b.z ,n.z);
vec3 vector = normalize(light - Position);
TangentSurface2Light = mat * vector;
vector = normalize(playerPos - Position);
TangentSurface2View = mat * vector;
}
Fragment Shader
#version 330
in vec3 Position;
in vec3 Normal;
in vec3 Tangent;
in vec3 Color;
in vec2 TexCoord;
in vec3 TangentSurface2Light;
in vec3 TangentSurface2View;
out vec4 outColor;
uniform vec3 lightPos;
uniform vec3 playerPos;
uniform mat4 view;
uniform sampler2D texture0;
uniform sampler2D texture_normal; // normal
uniform float repeatFactor = 1;
void main()
{
vec4 texColor = texture(texture0, TexCoord * repeatFactor);
vec4 matColor = vec4(Color, 1.0);
vec3 light = vec3(vec4(lightPos, 1.0));
float dist = length(light - Position);
// float att = 1.0 / (1.0 + 0.01 * dist + 0.001 * dist * dist);
float att = 1.0;
// Ambient
vec4 ambient = vec4(0.2);
// Diffuse
// vec3 surface2light = normalize(light - Position);
vec3 surface2light = normalize(TangentSurface2Light);
// vec3 norm = normalize(Normal);
vec3 norm = normalize(texture(texture_normal, TexCoord * repeatFactor).xyz * 2.0 - 1.0);
float contribution = max(dot(norm, surface2light), 0.0);
vec4 diffuse = contribution * vec4(0.6);
// Specular
// vec3 surf2view = normalize(-Position); // Player is always at position 0
vec3 surf2view = normalize(TangentSurface2View);
vec3 reflection = reflect(-surface2light, norm); // reflection vector
float specContribution = pow(max(dot(surf2view, reflection), 0.0), 32);
vec4 specular = vec4(1.0) * specContribution;
outColor = (ambient + (diffuse * att)+ (specular * pow(att, 3))) * texColor;
// outColor = vec4(Color, 1.0) * texture(texture0, TexCoord);
}
EDIT
Edited the shader code to calculate everything in world space instead of pingponging between world and camera space (easier to understand and less error-prone).
You are making strange manipulations with matrices. In VS you transform normal (that is model-space) by inverse view-world. That doesn't make any sense. It may be easier to do calculations in world-space. I've got some working sample code, but it uses a bit different naming.
Vertex shader:
void main_vs(in A2V input, out V2P output)
{
output.position = mul(input.position, _worldViewProjection);
output.normal = input.normal;
output.binormal = input.binormal;
output.tangent = input.tangent;
output.positionWorld = mul(input.position, _world);
output.tex = input.tex;
}
Here we transform position to projection(screen)-space, TBN is left in model-space, they will be used later. Also we get world-space position for lighting evaluation.
Pixel shader:
void main_ps(in V2P input, out float4 output : SV_Target)
{
float3x3 tbn = float3x3(input.tangent, -input.binormal, input.normal);
//extract & decode normal:
float3 texNormal = _normalTexture.Sample(_normalSampler, input.tex).xyz * 2 - 1;
//now transform TBN-space texNormal to world space:
float3 normal = mul(texNormal, tbn);
normal = normalize(mul(normal, _world));
float3 lightDirection = -_lightPosition.xyz;//directional
float3 viewDirection = normalize(input.positionWorld - _camera);
float3 reflectedLight = reflect(lightDirection, normal);
float diffuseIntensity = dot(normal, lightDirection);
float specularIntensity = max(0, dot(reflectedLight, viewDirection)*1.3);
output = ((_ambient + diffuseIntensity * _diffuse) * _texture.Sample(_sampler, input.tex)
+ pow(specularIntensity, 7) * float4(1,1,1,1)) * _lightColor;
}
Here I use directional light, you should do something like
float3 lightDirection = normalize(input.positionWorld - _lightPosition.xyz);//omni
Here we first have normal from texture, that is in TBN-space. Then we apply TBN matrix to transform it to model-space. Then apply world matrix to transform it to world-space, were we already have light position, eye, etc.
Some other shader code, ommitted above (DX11, but it's easy to translate):
cbuffer ViewTranforms
{
row_major matrix _worldViewProjection;
row_major matrix _world;
float3 _camera;
};
cbuffer BumpData
{
float4 _ambient;
float4 _diffuse;
};
cbuffer Textures
{
texture2D _texture;
SamplerState _sampler;
texture2D _normalTexture;
SamplerState _normalSampler;
};
cbuffer Light
{
float4 _lightPosition;
float4 _lightColor;
};
//------------------------------------
struct A2V
{
float4 position : POSITION;
float3 normal : NORMAL;
float3 binormal : BINORMAL;
float3 tangent : TANGENT;
float2 tex : TEXCOORD;
};
struct V2P
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
float3 binormal : BINORMAL;
float3 tangent : TANGENT;
float3 positionWorld : NORMAL1;
float2 tex : TEXCOORD;
};
Also, here I use precomputed binormal: you shall leave your code, that computes it (via cross(normal, tangent)).
Hope this helps.