HLSL Normal Mapping Matrix Multiplication - c++

I'm currently working in directx9 and have the following code for my normal mapping:
(Vertex Shader):
float4x4 gWorldMatrix;
float4x4 gWorldViewProjectionMatrix;
float4 gWorldLightPosition;
float4 gWorldCameraPosition;
struct VS_INPUT
{
float4 mPosition : POSITION;
float3 mNormal: NORMAL;
float3 mTangent: TANGENT;
float3 mBinormal: BINORMAL;
float2 mUV: TEXCOORD0;
};
struct VS_OUTPUT
{
float4 mPosition : POSITION;
float2 mUV: TEXCOORD0;
float3 mLightDir: TEXCOORD1;
float3 mViewDir: TEXCOORD2;
float3 T: TEXCOORD3;
float3 B: TEXCOORD4;
float3 N: TEXCOORD5;
};
VS_OUTPUT vs_main( VS_INPUT Input )
{
VS_OUTPUT Output;
Output.mPosition = mul( Input.mPosition, gWorldViewProjectionMatrix );
Output.mUV = Input.mUV;
float4 worldPosition = mul( Input.mPosition, gWorldMatrix );
float3 lightDir = worldPosition.xyz - gWorldLightPosition.xyz;
Output.mLightDir = normalize( lightDir );
float3 viewDir = normalize( worldPosition.xyz - gWorldCameraPosition.xyz );
Output.mViewDir = viewDir;
//object space=>world space
float3 worldNormal = mul( Input.mNormal, (float3x3)gWorldMatrix );
Output.N = normalize( worldNormal );
float3 worldTangent = mul( Input.mTangent, (float3x3)gWorldMatrix );
Output.T = normalize( worldTangent );
float3 worldBinormal = mul( Input.mBinormal, (float3x3)gWorldMatrix );
Output.B = normalize( worldBinormal);
return Output;
}
(Pixel Shader)
struct PS_INPUT
{
float2 mUV : TEXCOORD0;
float3 mLightDir: TEXCOORD1;
float3 mViewDir: TEXCOORD2;
float3 T: TEXCOORD3;
float3 B: TEXCOORD4;
float3 N: TEXCOORD5;
};
sampler2D DiffuseSampler;
sampler2D SpecularSampler;
sampler2D NormalSampler;
float3 gLightColor;
float4 ps_main(PS_INPUT Input) : COLOR
{
//read normal from tex
float3 tangentNormal = tex2D( NormalSampler, Input.mUV ).xyz;
tangentNormal = normalize( tangentNormal * 2 - 1 ); //convert 0~1 to -1~+1.
//read from vertex shader
float3x3 TBN = float3x3( normalize(Input.T), normalize(Input.B),
normalize(Input.N) ); //transforms world=>tangent space
TBN = transpose( TBN ); //transform tangent space=>world
float3 worldNormal = mul( TBN, tangentNormal ); //note: mat * scalar
//(since TBN is row matrix)
float3 lightDir = normalize( Input.mLightDir );
float3 diffuse = saturate( dot(worldNormal, -lightDir) );
float4 albedo = tex2D( DiffuseSampler, Input.mUV );
diffuse = gLightColor * albedo.rgb * diffuse;
float3 specular = 0;
if ( diffuse.x > 0 )
{
float3 reflection = reflect( lightDir, worldNormal );
float3 viewDir = normalize( Input.mViewDir );
specular = saturate( dot(reflection, -viewDir) );
specular = pow( specular, 20.0f );
//further adjustments to specular (since texture is 2D)
float specularIntensity = tex2D( SpecularSampler, Input.mUV );
specular *= specularIntensity * gLightColor;
}
float3 ambient = float3(0.1f, 0.1f, 0.1f) * albedo;
return float4(ambient + diffuse + specular, 1);
}
The code works, but I don't quite understand why I need to do the
TBN = transpose( TBN ); in the pixel shader.
The TBN values I passed through the Vertex Shader are those in world space (hence why I multiplied gWorldMatrix), yet I'm told that
float3x3 TBN = float3x3( normalize(Input.T), normalize(Input.B),
normalize(Input.N) );
transforms world=>tangent(surface) space.
Why is this?

You need the line
TBN = transpose( TBN );
because you're multipling your tangent-space normal from the right to the matrix. Therefore it's considered as a column vector, while the base vectors are in the rows of the matrix. So the matrix must be transposed, that the base transformation can be applied. You can omit the transposition, if youre switch the multiplication to
float3 worldNormal = mul( tangentNormal, TBN );
Because your multiplied the T,N and B vector with the worldmatrix your TBN matrix transforms from tangent space to world space (TBN transforms into object-space, after that world transforms into worldspace). Other implementations multiply the TBN with the world inverse transpose matrix. With the resulting TBN you can transform the light vector from world into tangent space and compare it to the tangent normal. So I think the one who told you that TBN transforms world to tangent space uses this approach (It saves some performance, because the heavy matrix-operations are done in the vertexshader).

Related

Add generic Bump Map in Fragment Shader

I have a obj with normals and I want to use a surface bump map on it.
Can anyone help me how can I calculate the final normal map from the obj normals and the Bump map
my fragment shader is:
fragment float4 fragmentShaderObj(VertexOutObj interpolated [[stage_in]], const device Uniforms& uniforms [[ buffer(1) ]], texture2d_array<float> tex2D [[ texture(0) ]], sampler sampler2D [[ sampler(0) ]]) {
float4x4 mv_Matrix = uniforms.modelMatrix;
// Ambient
Light light = uniforms.light;
if(interpolated.mapB >= 0){
float3 norm = (2.0*tex2D.sample(sampler2D, interpolated.texCoord,interpolated.mapB).rgb-float3(1,1,1));
}
interpolated.normal = normalize(interpolated.normal);
float4 ambientColor = float4(light.color *interpolated.ka, 1);
//Diffuse
float3 normal_inv = float3(0,0,0)-interpolated.normal;
//float diffuseFactor1 = max(0.0,dot(interpolated.normal, light.direction));
float diffuseFactor1 = max(max(0.0,dot(interpolated.normal, light.direction)),dot(normal_inv, light.direction));
float4 diffuseColor = float4(light.color * diffuseFactor1*interpolated.kd ,1.0);
//Specular
float3 eye = normalize(interpolated.fragmentPosition);
float3 reflection = reflect(light.direction, interpolated.normal);
float specularFactor = max(0.0, dot(reflection, eye));
float4 specularColor = float4(light.color * interpolated.ns * specularFactor * interpolated.ks,1.0);
float4 color = tex2D.sample(sampler2D, interpolated.texCoord,interpolated.mapKd);
if(color.a < 0.1)
discard_fragment();
color.rgb = color.rgb * (ambientColor + diffuseColor + specularColor).rgb;
return color;
}
now how can I add that norm with the interpolated.normal. it can't be straight forward addition.

Point Lighting Error Directx 11

I'm new to Directx 11, and I programmed a distance dependent point light shader that works pretty well for rotated and translated objects, but after I tried scaling my models, the lighting got dimmer if I scaled the model larger, and the lighting got brighter if I scaled the model smaller. I thought it might be the normals, but I made sure to multiply them by the inverse transpose of the world matrix, and I made sure to normalize them in the pixel shader after they are interpolated. Here is the shader code:
Texture2D txDiffuse : register( t0 );
SamplerState samAnisotropic
{
Filter = ANISOTROPIC;
MaxAnisotropy = 4;
};
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
matrix WorldInvTrans;
float3 LightPos;
float pad1;
float3 EyePos;
float pad2;
float3 At;
float pad3;
float showNorms;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float3 Norm : NORMAL;
float2 TexCoor : TEXCOORD0;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float3 LightDir : POSITION0;
float3 EyeVector : POSITION1;
float2 TexCoor : TEXCOORD0;
float distance : FLOAT0;
float showNorms : FLOAT1;
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.LightDir = normalize( LightPos - output.Pos );
output.EyeVector = normalize( EyePos - At );
output.distance = distance( LightPos, output.Pos);
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul( input.Norm, WorldInvTrans );
output.TexCoor = input.TexCoor;
output.showNorms = showNorms;
return output;
}
float4 PS( PS_INPUT input) : SV_Target
{
input.Norm = normalize( input.Norm );
float specTerm = 0;
float3 ReflVector = normalize( reflect( input.LightDir, input.Norm ) );
[flatten]
if ( dot( ReflVector, input.EyeVector ) >= 0 )
{
specTerm = pow( dot( ReflVector, input.EyeVector ) , 50 );
}
float diffuseTerm = saturate( dot( input.LightDir, input.Norm ) );
float4 ambient = float4( 0.25f, 0.25f, 0.25f, 1.0f );
float4 lightColor = float4( 1.0f, 1.0f, 1.0f, 1.0f );
return ( (ambient + (diffuseTerm + specTerm) / (pow( input.distance, 1 ) * 0.025f)) * lightColor * txDiffuse.Sample( samAnisotropic, input.TexCoor ) ) * ( 1 - input.showNorms ) + float4( input.Norm, 1.0f ) * input.showNorms;
}
I was still suspicious that the normals weren't correct, so I edited the last line in my pixel shader to shade the model based on the normal vectors if showNorms = 1.0f. The normals looked like they were transformed correctly. Still suspicious, I replaced my model with a plane on the XZ axis, and scaled it up 50 times. When I rendered it, the lighting was still dim, but the plane was green when I set showNorms to 1.0f, which must mean that the normals are all pointing in the upwards Y direction. If I'm transforming my normals correctly and normalizing them, what could be causing these lighting errors?
If this helps, here is my code when I set the constant buffers for the plane:
//Render Plane
mWorld = XMMatrixIdentity();
cb1.mWorld = XMMatrixTranspose( XMMatrixMultiply( XMMatrixMultiply( mWorld, XMMatrixScaling( 50.0f, 1.0f, 50.0f ) ), XMMatrixTranslation( 0.0f, -5.0f, 0.0f ) ) );
XMMATRIX A = cb1.mWorld;
A.r[3] = XMVectorSet(0.0f, 0.0f, 0.0f, 1.0f);
det = XMMatrixDeterminant(A);
cb1.mWorldInvTrans = XMMatrixInverse(&det, A);
g_pImmediateContext->UpdateSubresource( g_pcBufferShader1, 0, NULL, &cb1, 0, 0 );
Edit: I changed the code a little bit to fix the specTerm:
Texture2D txDiffuse : register( t0 );
SamplerState samAnisotropic
{
Filter = ANISOTROPIC;
MaxAnisotropy = 4;
};
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
matrix WorldInvTrans;
float3 LightPos;
float pad1;
float3 EyePos;
float pad2;
float3 At;
float pad3;
float showNorms;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float3 Norm : NORMAL;
float2 TexCoor : TEXCOORD0;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float3 LightDir : POSITION0;
float3 EyeVector : POSITION1;
float2 TexCoor : TEXCOORD0;
float distance : FLOAT0;
float showNorms : FLOAT1;
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.LightDir = LightPos - output.Pos;
output.EyeVector = EyePos - At;
output.distance = distance( LightPos, output.Pos );
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul( input.Norm, WorldInvTrans );
output.TexCoor = input.TexCoor;
output.showNorms = showNorms;
return output;
}
float4 PS( PS_INPUT input) : SV_Target
{
input.Norm = normalize( input.Norm );
input.LightDir = normalize( input.LightDir );
input.EyeVector = normalize( input.EyeVector );
float specTerm = 0;
float3 ReflVector = normalize( reflect( -input.LightDir, input.Norm ) );
[flatten]
if ( dot( ReflVector, input.EyeVector ) >= 0 )
{
specTerm = pow( dot( ReflVector, input.EyeVector ) , 50 );
}
float diffuseTerm = saturate( dot( input.LightDir, input.Norm ) );
float4 ambient = float4( 0.25f, 0.25f, 0.25f, 1.0f );
float4 lightColor = float4( 1.0f, 1.0f, 1.0f, 1.0f );
return ( (ambient + (diffuseTerm + specTerm) / (pow( input.distance, 1 ) * 0.025f)) * lightColor * txDiffuse.Sample( samAnisotropic, input.TexCoor ) ) * ( 1 - input.showNorms ) + float4( input.Norm, 1.0f ) * input.showNorms;
}
I think you should try to normalize the LightDir vector in the pixel shader as well. If the plane is really large it may happen, that after the interpolation of these two vectors, the vector you get in the pixel shader is not normalized. This error is likely to increase as the scale goes up. Give it a try. The picture below shows this problem.

Normal Mapping and translation disrupts my lighting

I got a normal mapping issue. I have a texture and a normal texture on each model loaded via the ASSIMP library. I am calculating the tangent vectors on each object with the help of the ASSIMP library so these should be fine. The objects work perfectly with normal mapping but as soon as I start translating one of the objects (thus influence the Model matrix with translations) the lighting fails. As you can see on the image, the floor (which is translated down the y axis) seems to lose most of its diffuse lighting and its specular lighting is in the wrong direction (it should be between the lightbulb and the player position)
It might have something to do with the normal matrix (although translations should be lost), maybe something with a wrong matrix used in the shaders. I am out of ideas and was hoping you could shed some insight into the issue.
Vertex shader:
#version 330
layout(location = 0) in vec3 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec3 tangent;
layout(location = 3) in vec3 color;
layout(location = 4) in vec2 texCoord;
// fragment pass through
out vec3 Position;
out vec3 Normal;
out vec3 Tangent;
out vec3 Color;
out vec2 TexCoord;
out vec3 TangentSurface2Light;
out vec3 TangentSurface2View;
uniform vec3 lightPos;
uniform vec3 playerPos;
// vertex transformation
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
mat3 normalMatrix = mat3(transpose(inverse(model)));
Position = vec3(model * vec4(position, 1.0));
Normal = normalMatrix * normal;
Tangent = tangent;
Color = color;
TexCoord = texCoord;
gl_Position = projection * view * model * vec4(position, 1.0);
// Calculate tangent matrix and calculate fragment bump mapping coord space.
vec3 light = lightPos;
vec3 n = normalize(normalMatrix * normal);
vec3 t = normalize(normalMatrix * tangent);
vec3 b = cross(n, t);
// create matrix for tangent (from vertex to tangent-space)
mat3 mat = mat3(t.x, b.x ,n.x, t.y, b.y ,n.y, t.z, b.z ,n.z);
vec3 vector = normalize(light - Position);
TangentSurface2Light = mat * vector;
vector = normalize(playerPos - Position);
TangentSurface2View = mat * vector;
}
Fragment Shader
#version 330
in vec3 Position;
in vec3 Normal;
in vec3 Tangent;
in vec3 Color;
in vec2 TexCoord;
in vec3 TangentSurface2Light;
in vec3 TangentSurface2View;
out vec4 outColor;
uniform vec3 lightPos;
uniform vec3 playerPos;
uniform mat4 view;
uniform sampler2D texture0;
uniform sampler2D texture_normal; // normal
uniform float repeatFactor = 1;
void main()
{
vec4 texColor = texture(texture0, TexCoord * repeatFactor);
vec4 matColor = vec4(Color, 1.0);
vec3 light = vec3(vec4(lightPos, 1.0));
float dist = length(light - Position);
// float att = 1.0 / (1.0 + 0.01 * dist + 0.001 * dist * dist);
float att = 1.0;
// Ambient
vec4 ambient = vec4(0.2);
// Diffuse
// vec3 surface2light = normalize(light - Position);
vec3 surface2light = normalize(TangentSurface2Light);
// vec3 norm = normalize(Normal);
vec3 norm = normalize(texture(texture_normal, TexCoord * repeatFactor).xyz * 2.0 - 1.0);
float contribution = max(dot(norm, surface2light), 0.0);
vec4 diffuse = contribution * vec4(0.6);
// Specular
// vec3 surf2view = normalize(-Position); // Player is always at position 0
vec3 surf2view = normalize(TangentSurface2View);
vec3 reflection = reflect(-surface2light, norm); // reflection vector
float specContribution = pow(max(dot(surf2view, reflection), 0.0), 32);
vec4 specular = vec4(1.0) * specContribution;
outColor = (ambient + (diffuse * att)+ (specular * pow(att, 3))) * texColor;
// outColor = vec4(Color, 1.0) * texture(texture0, TexCoord);
}
EDIT
Edited the shader code to calculate everything in world space instead of pingponging between world and camera space (easier to understand and less error-prone).
You are making strange manipulations with matrices. In VS you transform normal (that is model-space) by inverse view-world. That doesn't make any sense. It may be easier to do calculations in world-space. I've got some working sample code, but it uses a bit different naming.
Vertex shader:
void main_vs(in A2V input, out V2P output)
{
output.position = mul(input.position, _worldViewProjection);
output.normal = input.normal;
output.binormal = input.binormal;
output.tangent = input.tangent;
output.positionWorld = mul(input.position, _world);
output.tex = input.tex;
}
Here we transform position to projection(screen)-space, TBN is left in model-space, they will be used later. Also we get world-space position for lighting evaluation.
Pixel shader:
void main_ps(in V2P input, out float4 output : SV_Target)
{
float3x3 tbn = float3x3(input.tangent, -input.binormal, input.normal);
//extract & decode normal:
float3 texNormal = _normalTexture.Sample(_normalSampler, input.tex).xyz * 2 - 1;
//now transform TBN-space texNormal to world space:
float3 normal = mul(texNormal, tbn);
normal = normalize(mul(normal, _world));
float3 lightDirection = -_lightPosition.xyz;//directional
float3 viewDirection = normalize(input.positionWorld - _camera);
float3 reflectedLight = reflect(lightDirection, normal);
float diffuseIntensity = dot(normal, lightDirection);
float specularIntensity = max(0, dot(reflectedLight, viewDirection)*1.3);
output = ((_ambient + diffuseIntensity * _diffuse) * _texture.Sample(_sampler, input.tex)
+ pow(specularIntensity, 7) * float4(1,1,1,1)) * _lightColor;
}
Here I use directional light, you should do something like
float3 lightDirection = normalize(input.positionWorld - _lightPosition.xyz);//omni
Here we first have normal from texture, that is in TBN-space. Then we apply TBN matrix to transform it to model-space. Then apply world matrix to transform it to world-space, were we already have light position, eye, etc.
Some other shader code, ommitted above (DX11, but it's easy to translate):
cbuffer ViewTranforms
{
row_major matrix _worldViewProjection;
row_major matrix _world;
float3 _camera;
};
cbuffer BumpData
{
float4 _ambient;
float4 _diffuse;
};
cbuffer Textures
{
texture2D _texture;
SamplerState _sampler;
texture2D _normalTexture;
SamplerState _normalSampler;
};
cbuffer Light
{
float4 _lightPosition;
float4 _lightColor;
};
//------------------------------------
struct A2V
{
float4 position : POSITION;
float3 normal : NORMAL;
float3 binormal : BINORMAL;
float3 tangent : TANGENT;
float2 tex : TEXCOORD;
};
struct V2P
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
float3 binormal : BINORMAL;
float3 tangent : TANGENT;
float3 positionWorld : NORMAL1;
float2 tex : TEXCOORD;
};
Also, here I use precomputed binormal: you shall leave your code, that computes it (via cross(normal, tangent)).
Hope this helps.

Why does my objects lose its lighting when the camera gets far away?

I'm building a 3D scene in OpenGL 2.1 and illuminating it using a directional light with the Phong lighting model.
Up close everything seems to work fine but, when the camera gets away from the models they lose all the lighting (except ambient).
What could make this happen?
These is the vertex shader:
uniform mat4 viewMatrix;
uniform mat4 modelMatrix;
uniform mat4 projectionMatrix;
uniform mat3 normalMatrix;
uniform vec3 lightDir;
out vec3 normal;
out vec3 lightDir_viewSpace;
out vec3 vertexPos_viewSpace;
void main(){
normal = normalize( normalMatrix * gl_Normal );
gl_Position = projectionMatrix * viewMatrix * modelMatrix * gl_Vertex;
vertexPos_viewSpace = - ( viewMatrix * modelMatrix * gl_Vertex ).xyz;;
lightDir_viewSpace = normalize( viewMatrix * vec4(lightDir, 1) ).xyz;
}
And here is the fragment shader:
uniform vec3 Ka;
uniform vec3 Kd;
uniform vec3 Ks;
uniform float Shininess;
in vec3 normal;
in vec3 lightDir_viewSpace;
in vec3 vertexPos_viewSpace;
float getdiffuseIntensity( vec3 N, vec3 L ){
float intensity = clamp(dot(L , N), 0.0, 1.0);
return intensity;
}
float getSpecularIntensity( vec3 N, vec3 L, vec3 vertexPos, float shine ){
vec3 R = normalize( reflect( -L, N ) );
vec3 V = normalize( vertexPos );
float intensity = 0.0;
if ( dot(N, L) > 0.0 ){
float cosVR = clamp( dot(V, R), 0.0, 1.0 );
intensity = pow( cosVR, shine );
}
return intensity;
}
void main(){
vec3 normalNorm = normalize( normal );
vec3 lightDirNorm = normalize( lightDir_viewSpace );
vec3 vertexDirNorm = normalize( vertexPos_viewSpace );
vec3 ilumAmbi = Ka;
vec3 ilumDiff = Kd * getdiffuseIntensity( normalNorm, lightDirNorm );
vec3 ilumEspec = Ks * getSpecularIntensity( normalNorm, lightDirNorm, vertexDirNorm, Shininess );
gl_FragColor = vec4( ilumAmbi + ilumDiff + ilumEspec , 1.0 );
}
Also in case someone asks; Yes, this is a school project
Am I doing something wrong?
The problem lies here:
lightDir_viewSpace = normalize( viewMatrix * vec4(lightDir, 1) ).xyz
What that does is interpret lightDir as a point (x,y,z,1) instead of a vector (x,y,z,0). Your code does some sort of point lighting, which is why the lighting changes with the camera distance. So the correct code is
lightDir_viewSpace = normalize( viewMatrix * vec4(lightDir, 0) ).xyz
However, note that this expression always evaluates to the same vector for every vertex and for every fragment. So it's in fact better to compute it on the CPU, and instead use uniform vec3 lightDir_viewSpace in the fragment shader.
That also means that you won't need separate viewMatrix and modelMatrix uniforms anymore. Instead, compute modelViewMatrix = viewMatrix * modelMatrix on the CPU, then use the uniform mat4 modelViewMatrix in the vertex shader.
That's also the reason why lighting in view space is fine, there's no need to do it in world space.
Always make sure that your vectors are all in the same space (model space, world space or view space).
Your normal is in world space, while your lightDir is in view space, so you should change the lightDir to world space, which is easier as your input (uniform vec3 lightDir;)
is already in world space.
The second thing thats wrong is your specular calculation. Specular highlights, in the Phong model, are calculated by using the angle (that's what dot(V, R) does) between the ideal reflection vector (vec3 R = normalize( reflect( -L, N ) );) and the direction to the viewer.
So the final code would be:
uniform mat4 viewMatrix;
uniform mat4 modelMatrix;
uniform mat4 projectionMatrix;
uniform mat3 normalMatrix;
uniform vec3 viewPos; // the Position of your view in world coordinates
out vec3 normal;
out vec3 viewDir;
void main(){
normal = normalize( normalMatrix * gl_Normal );
gl_Position = projectionMatrix * viewMatrix * modelMatrix * gl_Vertex;
vec4 worldPos = modelMatrix * gl_Vertex;
viewDir = worldPos.xyz - viewPos; // maybe you have to change the ordering, i'm not sure
}
and:
uniform vec3 Ka;
uniform vec3 Kd;
uniform vec3 Ks;
uniform float Shininess;
uniform vec3 lightDir;
in vec3 normal;
in vec3 viewDir;
float getdiffuseIntensity( vec3 N, vec3 L ){
float intensity = clamp(dot(L , N), 0.0, 1.0);
return intensity;
}
float getSpecularIntensity( vec3 N, vec3 L, vec3 vertexPos, float shine ){
vec3 R = normalize( reflect( -L, N ) );
vec3 V = normalize( vertexPos );
float intensity = 0.0;
if ( dot(N, L) > 0.0 ){
float cosVR = clamp( dot(V, R), 0.0, 1.0 );
intensity = pow( cosVR, shine );
}
return intensity;
}
void main(){
vec3 normalNorm = normal; // no need to normalize
vec3 viewNorm = normalize( viewDir );
vec3 ilumAmbi = Ka;
vec3 ilumDiff = Kd * getdiffuseIntensity( normalNorm, lightDir);
vec3 ilumEspec = Ks * getSpecularIntensity( normalNorm, lightDir, viewDirNorm, Shininess );
gl_FragColor = vec4( ilumAmbi + ilumDiff + ilumEspec , 1.0 );
}
I didn't tested it though.

Texture Mapping Problem in Direct3D

hey guys
i am having trouble rendering textures through shaders. I really don't know why, the shader file just implements a simple phong lighting and interpolate the texture on a simple quad but all i am getting is the lighting and material colors, as if there is no texture(the texture is a Stone texture but all i am getting is white color)
here is the shader file
//------------------------------------
uniform extern float4x4 matWVP;
uniform extern float4x4 matITW;
uniform extern float4x4 matW;
uniform extern float4 DiffMatr;
uniform extern float4 DiffLight;
uniform extern float4 AmbLight;
uniform extern float4 SpecLight;
uniform extern float4 AmbMatr;
uniform extern float3 LightPosW;
uniform extern float4 SpecMatr;
uniform extern float SpecPower;
uniform extern float3 EyePos;
uniform extern texture Tex;
//------------------------------------
sampler sTex = sampler_state
{
Texture = <Tex>;
Minfilter = LINEAR;
Magfilter = LINEAR;
Mipfilter = POINT;
AddressU = WRAP;
AddressV = WRAP;
};
struct vOut {
float4 posH : POSITION0;
float3 posW : TEXCOORD0;
float3 normW: TEXCOORD1;
float2 cTex : TEXCOORD2;
};
//------------------------------------
vOut VS_Def(float3 posL : POSITION0
, float3 normL : NORMAL0
, float2 cTex : TEXCOORD0)
{
vOut V = (vOut)0;
V.posH = mul(float4(posL, 1.0f), matWVP);
V.posW = mul(float4(posL, 1.0f), matW).xyz;
V.normW = mul(float4(normL, 1.0f), matITW).xyz;
V.normW = normalize(V.normW);
V.cTex = V.cTex;
return V;
}
float4 PS_Def(float3 posW : TEXCOORD0
,float4 normW : TEXCOORD1
,float2 cTex : TEXCOORD2 ): COLOR
{
float3 LightVec = LightPosW - posW;
LightVec = normalize(LightVec);
float3 RefLightVec = reflect(-LightVec, normW);
float3 EyeVec = EyePos - posW;
EyeVec = normalize(EyePos - posW);
float d = max(0.0f, dot(normW, LightVec));
float s = pow(max(dot(RefLightVec, EyeVec), 0.0f), SpecPower);
float3 Diff = d * (DiffMatr * DiffLight).rgb;
float3 Amb = (AmbMatr * AmbLight).rgb;
float3 Spec = s * (SpecMatr * SpecLight).rgb;
float3 TexColor = tex2D(sTex, cTex.xy).rgb;
float3 color = Diff + Amb;
return float4(color * TexColor + Spec, DiffMatr.a);;
}
//-----------------------------------
technique DefTech
{
pass p0
{
VertexShader = compile vs_2_0 VS_Def();
PixelShader = compile ps_2_0 PS_Def();
}
}
the lighting and material colors are as follows:
(P.S : WHITE = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f))
Diffuse Material = WHITE;
Ambient Material = WHITE;
Specular Material = WHITE * 0.5f;
Diffuse Lighting = WHITE * 0.8f;
Ambient Lighting = WHITE * 0.8f;
Specular Lighting = WHITE * 0.5f;
Specular Power = 48.0f;
Appreciate the help, guys
In your vertex shader you have
V.cTex = V.cTex;
Shouldn't this be
V.cTex = cTex;