Point Lighting Error Directx 11 - c++

I'm new to Directx 11, and I programmed a distance dependent point light shader that works pretty well for rotated and translated objects, but after I tried scaling my models, the lighting got dimmer if I scaled the model larger, and the lighting got brighter if I scaled the model smaller. I thought it might be the normals, but I made sure to multiply them by the inverse transpose of the world matrix, and I made sure to normalize them in the pixel shader after they are interpolated. Here is the shader code:
Texture2D txDiffuse : register( t0 );
SamplerState samAnisotropic
{
Filter = ANISOTROPIC;
MaxAnisotropy = 4;
};
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
matrix WorldInvTrans;
float3 LightPos;
float pad1;
float3 EyePos;
float pad2;
float3 At;
float pad3;
float showNorms;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float3 Norm : NORMAL;
float2 TexCoor : TEXCOORD0;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float3 LightDir : POSITION0;
float3 EyeVector : POSITION1;
float2 TexCoor : TEXCOORD0;
float distance : FLOAT0;
float showNorms : FLOAT1;
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.LightDir = normalize( LightPos - output.Pos );
output.EyeVector = normalize( EyePos - At );
output.distance = distance( LightPos, output.Pos);
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul( input.Norm, WorldInvTrans );
output.TexCoor = input.TexCoor;
output.showNorms = showNorms;
return output;
}
float4 PS( PS_INPUT input) : SV_Target
{
input.Norm = normalize( input.Norm );
float specTerm = 0;
float3 ReflVector = normalize( reflect( input.LightDir, input.Norm ) );
[flatten]
if ( dot( ReflVector, input.EyeVector ) >= 0 )
{
specTerm = pow( dot( ReflVector, input.EyeVector ) , 50 );
}
float diffuseTerm = saturate( dot( input.LightDir, input.Norm ) );
float4 ambient = float4( 0.25f, 0.25f, 0.25f, 1.0f );
float4 lightColor = float4( 1.0f, 1.0f, 1.0f, 1.0f );
return ( (ambient + (diffuseTerm + specTerm) / (pow( input.distance, 1 ) * 0.025f)) * lightColor * txDiffuse.Sample( samAnisotropic, input.TexCoor ) ) * ( 1 - input.showNorms ) + float4( input.Norm, 1.0f ) * input.showNorms;
}
I was still suspicious that the normals weren't correct, so I edited the last line in my pixel shader to shade the model based on the normal vectors if showNorms = 1.0f. The normals looked like they were transformed correctly. Still suspicious, I replaced my model with a plane on the XZ axis, and scaled it up 50 times. When I rendered it, the lighting was still dim, but the plane was green when I set showNorms to 1.0f, which must mean that the normals are all pointing in the upwards Y direction. If I'm transforming my normals correctly and normalizing them, what could be causing these lighting errors?
If this helps, here is my code when I set the constant buffers for the plane:
//Render Plane
mWorld = XMMatrixIdentity();
cb1.mWorld = XMMatrixTranspose( XMMatrixMultiply( XMMatrixMultiply( mWorld, XMMatrixScaling( 50.0f, 1.0f, 50.0f ) ), XMMatrixTranslation( 0.0f, -5.0f, 0.0f ) ) );
XMMATRIX A = cb1.mWorld;
A.r[3] = XMVectorSet(0.0f, 0.0f, 0.0f, 1.0f);
det = XMMatrixDeterminant(A);
cb1.mWorldInvTrans = XMMatrixInverse(&det, A);
g_pImmediateContext->UpdateSubresource( g_pcBufferShader1, 0, NULL, &cb1, 0, 0 );
Edit: I changed the code a little bit to fix the specTerm:
Texture2D txDiffuse : register( t0 );
SamplerState samAnisotropic
{
Filter = ANISOTROPIC;
MaxAnisotropy = 4;
};
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
matrix WorldInvTrans;
float3 LightPos;
float pad1;
float3 EyePos;
float pad2;
float3 At;
float pad3;
float showNorms;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float3 Norm : NORMAL;
float2 TexCoor : TEXCOORD0;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float3 LightDir : POSITION0;
float3 EyeVector : POSITION1;
float2 TexCoor : TEXCOORD0;
float distance : FLOAT0;
float showNorms : FLOAT1;
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.LightDir = LightPos - output.Pos;
output.EyeVector = EyePos - At;
output.distance = distance( LightPos, output.Pos );
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul( input.Norm, WorldInvTrans );
output.TexCoor = input.TexCoor;
output.showNorms = showNorms;
return output;
}
float4 PS( PS_INPUT input) : SV_Target
{
input.Norm = normalize( input.Norm );
input.LightDir = normalize( input.LightDir );
input.EyeVector = normalize( input.EyeVector );
float specTerm = 0;
float3 ReflVector = normalize( reflect( -input.LightDir, input.Norm ) );
[flatten]
if ( dot( ReflVector, input.EyeVector ) >= 0 )
{
specTerm = pow( dot( ReflVector, input.EyeVector ) , 50 );
}
float diffuseTerm = saturate( dot( input.LightDir, input.Norm ) );
float4 ambient = float4( 0.25f, 0.25f, 0.25f, 1.0f );
float4 lightColor = float4( 1.0f, 1.0f, 1.0f, 1.0f );
return ( (ambient + (diffuseTerm + specTerm) / (pow( input.distance, 1 ) * 0.025f)) * lightColor * txDiffuse.Sample( samAnisotropic, input.TexCoor ) ) * ( 1 - input.showNorms ) + float4( input.Norm, 1.0f ) * input.showNorms;
}

I think you should try to normalize the LightDir vector in the pixel shader as well. If the plane is really large it may happen, that after the interpolation of these two vectors, the vector you get in the pixel shader is not normalized. This error is likely to increase as the scale goes up. Give it a try. The picture below shows this problem.

Related

Why does the Constant Buffer update partially in Direct11

Working my way through the "Introduction to 3D Game Programming with Directx 11". I am reworking the samples so as to not use the Effects Framework, so far all good.
However, I have come up with a problem that one of the constant buffers only partially updates.
CPU side CB struct:
struct CBPerFrame
{
DirectionalLight DirLight[3];
DirectX::XMFLOAT3 EyePositionW;
DirectX::XMFLOAT4 FogColour;
float FogStart;
float FogRange;
int LightNumber;
double Padding;
};
Where I update the CB prior to any drawing.
CBPerFrame cbPerFrame { };
cbPerFrame.DirLight[ 0 ] = mDirectionalLights[ 0 ];
cbPerFrame.DirLight[ 1 ] = mDirectionalLights[ 1 ];
cbPerFrame.DirLight[ 2 ] = mDirectionalLights[ 2 ];
cbPerFrame.EyePositionW = mEyePosW;
cbPerFrame.FogColour = XMFLOAT4( Colors::Black );
cbPerFrame.FogRange = 1.0F;
cbPerFrame.FogStart = 0.0F;
cbPerFrame.LightNumber = mLightCount;
cbPerFrame.Padding = 0.0;
mD3DImmediateContext->UpdateSubresource( mCBPerFrame.Get( ), 0, nullptr, &cbPerFrame, 0, 0 );
Pixel Shader:
cbuffer CBPerFrame : register( b0 )
{
DirectionalLight gDirectionalLights[ 3 ];
float3 gEyePosW;
float4 gFogColor;
float gFogStart;
float gFogRange;
int gLightCount;
double gPadding;
}
cbuffer CBPerObject : register( b1 )
{
matrix gWorld;
matrix gWorldInverseTranspose;
matrix gWorldViewProjection;
float4x4 gTextureTransform;
Material gMaterial;
}
float4 main( VertexOut input ) : SV_TARGET
{
// Interpolating normal can unnormalize it, so normalize it.
input.NormalW = normalize( input.NormalW );
// The toEye vector is used in lighting.
float3 toEye = normalize( gEyePosW - input.PositionW );
// Cache the distance to the eye from this surface point.
float distToEye = length( toEye );
// Normalize.
toEye /= distToEye;
//
// Lighting.
//
// Start with a sum of zero.
float4 ambient = float4( 0.0F, 0.0F, 0.0F, 0.0F );
float4 diffuse = float4( 0.0F, 0.0F, 0.0F, 0.0F );
float4 spec = float4( 0.0F, 0.0F, 0.0F, 0.0F );
// Sum the light contribution from each light source.
/* [unroll]*/
for ( int i = 0; i < gLightCount; i++ )
{
float4 A, D, S;
ComputeDirectionalLight( gMaterial, gDirectionalLights[ i ], input.NormalW, toEye, A, D, S );
ambient += A;
diffuse += D;
spec += S;
}
float4 litColour = ambient + diffuse + spec;
// Common to take alpha from diffuse material.
litColour.a = gMaterial.Diffuse.a;
return litColour;
}
gLightCount is always set to 0, even though it should be set to 2 at the start of the application. If I change the condition of the loop to a hardcoded 1/2/3 the shader works as it should.
I realise that there are extra variables in the CB, but the sample code has this and I believe it is used in further examples.
I think the issue is to do with how the CBPerFrame struct is padded, so it isn't been copied over to the GPU correctly. Any thoughts?
Thanks for any help.
It seems to be an issue with packing. According to Packing Rules for Constant Variables data should be packed at 4-byte boundaries but also so data blocks won't cross 16-byte boundary. In this case there will be definitely a padding after EyePositionW:
struct CBPerFrame
{
DirectionalLight DirLight[3];
DirectX::XMFLOAT3 EyePositionW;
float padding1;
DirectX::XMFLOAT4 FogColour;
Also i'm not sure why is there double gPadding; at the end. It should probably be another int.

c++ DirectX lighting in pixel shader issue

I have a problem that I cant manage to figure out. I just added a point light to my project and it makes the textures go completely black. I have no idea why.
I think that it might be either the normal that is not updating correctly or it might be calculation of s.x, s.y and s.z.
I would be very happy if someone had time to take a look at it and help me. Thanks.
So. Here is my pixel shader :
Texture2D txDiffuse : register(t0);
SamplerState sampState;
cbuffer PointLight : register(b0)
{
float3 Pos;
float diff;
float amb;
float spec;
float range;
float intensity;
};
struct VS_IN
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD;
float4 Norm : NORMAL;
float4 Pos2 : POSITION;
};
float4 PS_main(VS_IN input) : SV_Target
{
float3 s = txDiffuse.Sample(sampState, input.Tex).xyz;
float3 lightPos = Pos;
float3 lightVector = lightPos - input.Pos2;
lightVector = normalize(lightVector);
float nDotL = dot(lightVector, input.Norm);
float diff1 = 0.8;
float amb1 = 0.1;
s.x = (s.x * diff * nDotL + s.x * amb);
s.y = (s.y * diff * nDotL + s.y * amb);
s.z = (s.z * diff * nDotL + s.z * amb);
return float4(s, 0.0);
};
Geometry shader :
cbuffer worldMatrix : register(b0)
{
matrix world;
}
cbuffer viewMatrix : register(b1)
{
matrix view;
}
cbuffer projectionMatrix : register(b2)
{
matrix projection;
}
struct VS_IN
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD;
};
struct VS_OUT
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD;
float4 Norm : NORMAL;
float4 Pos2 : POSITION;
};
[maxvertexcount(6)]
void main(triangle VS_IN input[3] : SV_POSITION, inout TriangleStream< VS_OUT > output2)
{
matrix wvp = mul(projection, mul(world, view));
matrix worldView = mul(world, view);
float4 normal = float4(cross(input[1].Pos - input[0].Pos, input[2].Pos - input[0].Pos), 0.0f);
normal = normalize(normal);
float4 rotNorm = mul(worldView, normal);
rotNorm = normalize(rotNorm);
VS_OUT output[3];
for (uint i = 0; i < 3; i++)
{
output[i].Pos = input[i].Pos;
output[i].Pos = mul(wvp, input[i].Pos);
output[i].Tex = input[i].Tex;
output[i].Norm = rotNorm;
output[i].Pos2 = mul(worldView, output[i].Pos);
output2.Append(output[i]);
}
output2.RestartStrip();
VS_OUT outputcopy[3];
for (uint i = 0; i < 3; i++)
{
outputcopy[i].Pos = input[i].Pos + (normal);
outputcopy[i].Pos = mul(wvp, outputcopy[i].Pos);
outputcopy[i].Tex = input[i].Tex;
outputcopy[i].Norm = rotNorm;
outputcopy[i].Pos2 = mul(worldView, outputcopy[i].Pos);
output2.Append(outputcopy[i]);
}
output2.RestartStrip();
}
Code to initializing the point light:
struct PointLight
{
Vector3 Pos;
float diff;
float amb;
float spec;
float range;
float intensity;
};
PointLight* pointLight = nullptr;
PointLight PL =
{
Vector3(0.0f, 0.0f, -3.0f),
0.8f,
0.2f,
0.0f,
100.0f,
1.0f
};
pointLight = &PL;
D3D11_BUFFER_DESC lightBufferDesc;
memset(&lightBufferDesc, 0, sizeof(lightBufferDesc));
lightBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
lightBufferDesc.Usage = D3D11_USAGE_DEFAULT;
lightBufferDesc.StructureByteStride = 0;
lightBufferDesc.MiscFlags = 0;
lightBufferDesc.ByteWidth = sizeof(PointLight);
D3D11_SUBRESOURCE_DATA pointLightData;
memset(&pointLightData, 0, sizeof(pointLightData));
pointLightData.pSysMem = &PL;
gDevice->CreateBuffer(&lightBufferDesc, &pointLightData, &lightBuffer);
and in render() i run
gDeviceContext->PSSetConstantBuffers(0, 1, &lightBuffer);
Texture will be black if s.x, s.y, s.z equal to zero.
s.x = (s.x * diff * nDotL + s.x * amb);
s.y = (s.y * diff * nDotL + s.y * amb);
s.z = (s.z * diff * nDotL + s.z * amb);
Try to change diff and amb with a non-zero constant so that you can be sure that you set contant buffer correctly or not. If after you change them, it's still black then it must be nDotL and/or sampled texture that is zero. Then try with non-zero constant for texture sample. If they're still causing texture to look black then your light vector calculation is the culprit.

How do I properly create a spotlight in DirectX11?

I am working on a 3D project in DirectX11, and am currently implementing different lights using the Frank Luna 3D Game Programming with DirectX11 book with my existing code.
Currently, I am developing a spotlight, which should follow the camera's position and look in the same direction, however, the position that is being lit is moving oddly. When the position is being changes, the direction vector of the light seems to be tracking in the (+x, +y, 0) direction. Best explained with a picture.
It look here like they are lit properly, and if the camera stays where it is, the spotlight can be moved around as you'd expect, and it tracks the camera direction.
In this image, I've moved the camera closer to the boxes, along the z axis, and the light spot should just get smaller on the nearest box, but it's instead tracking upwards.
This is the code where the spotlight struct is being set up to be passed into the constant buffer, that is all of the values in the struct, aside from a float being used as a pad at the end:
cb.spotLight = SpotLight();
cb.spotLight.ambient = XMFLOAT4(0.5f, 0.5f, 0.5f, 1.0f);
cb.spotLight.specular = XMFLOAT4(0.5, 0.5, 0.5, 10.0);
cb.spotLight.diffuse = XMFLOAT4(0.5, 0.5, 0.5, 1.0);
cb.spotLight.attenuation = XMFLOAT3(1, 1, 1);
cb.spotLight.range = 15;
XMVECTOR cameraP = XMLoadFloat3(&cameraPos);
XMVECTOR s = XMVectorReplicate(cb.spotLight.range);
XMVECTOR l = XMLoadFloat3(&camera.getForwards());
XMVECTOR lookat = XMVectorMultiplyAdd(s, l, cameraP);
XMStoreFloat3(&cb.spotLight.direction, XMVector3Normalize(lookat - XMVectorSet(cameraPos.x, cameraPos.y, cameraPos.z, 1.0f)));
cb.spotLight.position = cameraPos;
cb.spotLight.spot = 96;
Here is the function being used to calculate the ambient, diffuse and specular values of the spotlight in the shader:
void calculateSpotLight(Material mat, SpotLight light, float3 position, float3 normal, float3 toEye,
out float4 ambient, out float4 diffuse, out float4 specular)
{
ambient = float4(0, 0, 0, 0);
specular = float4(0, 0, 0, 0);
diffuse = float4(0, 0, 0, 0);
float3 lightV = light.position - position;
float distance = length(lightV);
if (distance > light.range)
{
return;
}
lightV /= distance;
ambient = mat.ambient * light.ambient;
float diffuseFact = dot(lightV, normal);
[flatten]
if (diffuseFact > 0.0f)
{
float3 vect = reflect(-lightV, normal);
float specularFact = pow(max(dot(vect, toEye), 0.0f), mat.specular.w);
diffuse = diffuseFact * mat.diffuse * light.diffuse;
specular = specularFact * mat.specular * light.specular;
}
float spot = pow(max(dot(-lightV, float3(-light.direction.x, -light.direction.y, light.direction.z)), 0.0f), light.spot);
float attenuation = spot / dot(light.attenuation, float3(1.0f, distance, distance*distance));
ambient *= spot;
diffuse *= attenuation;
specular *= attenuation;
}
And for completenesses sake, the vertex and the relevant section of the pixel shader.
VS_OUTPUT VS( float4 Pos : POSITION, float3 NormalL : NORMAL, float2 TexC : TEXCOORD )
{
VS_OUTPUT output = (VS_OUTPUT)0;
output.Pos = mul( Pos, World );
//Get normalised vector to camera position in world coordinates
output.PosW = normalize(eyePos - output.Pos.xyz);
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
//Getting normalised surface normal
float3 normalW = mul(float4(NormalL, 0.0f), World).xyz;
normalW = normalize(normalW);
output.Norm = normalW;
output.TexC = TexC;
return output;
}
float4 PS( VS_OUTPUT input ) : SV_Target
{
input.Norm = normalize(input.Norm);
Material newMat;
newMat.ambient = material.ambient;
newMat.diffuse = texCol;
newMat.specular = specCol;
float4 ambient = (0.0f, 0.0f, 0.0f, 0.0f);
float4 specular = (0.0f, 0.0f, 0.0f, 0.0f);
float4 diffuse = (0.0f, 0.0f, 0.0f, 0.0f);
float4 amb, spec, diff;
calculateSpotLight(newMat, spotLight, input.PosW, input.Norm, input.PosW, amb, diff, spec);
ambient += amb;
specular += spec;
diffuse += diff;
//Other light types
float4 colour;
colour = ambient + specular + diffuse;
colour.a = material.diffuse.a;
return colour;
}
Where did I go wrong?
Third argument input.PosW is incorrect here. You must use position in world space. input.PosW is a normalized vector. It doesn't make any sense to subtract normalized vector from light position.
You have
calculateSpotLight(newMat, spotLight, input.PosW, input.Norm, input.PosW, amb, diff, spec);
You need (input.Pos in WS, not projection space)
calculateSpotLight(newMat, spotLight, input.Pos, input.Norm, input.PosW, amb, diff, spec);

HLSL Normal Mapping Matrix Multiplication

I'm currently working in directx9 and have the following code for my normal mapping:
(Vertex Shader):
float4x4 gWorldMatrix;
float4x4 gWorldViewProjectionMatrix;
float4 gWorldLightPosition;
float4 gWorldCameraPosition;
struct VS_INPUT
{
float4 mPosition : POSITION;
float3 mNormal: NORMAL;
float3 mTangent: TANGENT;
float3 mBinormal: BINORMAL;
float2 mUV: TEXCOORD0;
};
struct VS_OUTPUT
{
float4 mPosition : POSITION;
float2 mUV: TEXCOORD0;
float3 mLightDir: TEXCOORD1;
float3 mViewDir: TEXCOORD2;
float3 T: TEXCOORD3;
float3 B: TEXCOORD4;
float3 N: TEXCOORD5;
};
VS_OUTPUT vs_main( VS_INPUT Input )
{
VS_OUTPUT Output;
Output.mPosition = mul( Input.mPosition, gWorldViewProjectionMatrix );
Output.mUV = Input.mUV;
float4 worldPosition = mul( Input.mPosition, gWorldMatrix );
float3 lightDir = worldPosition.xyz - gWorldLightPosition.xyz;
Output.mLightDir = normalize( lightDir );
float3 viewDir = normalize( worldPosition.xyz - gWorldCameraPosition.xyz );
Output.mViewDir = viewDir;
//object space=>world space
float3 worldNormal = mul( Input.mNormal, (float3x3)gWorldMatrix );
Output.N = normalize( worldNormal );
float3 worldTangent = mul( Input.mTangent, (float3x3)gWorldMatrix );
Output.T = normalize( worldTangent );
float3 worldBinormal = mul( Input.mBinormal, (float3x3)gWorldMatrix );
Output.B = normalize( worldBinormal);
return Output;
}
(Pixel Shader)
struct PS_INPUT
{
float2 mUV : TEXCOORD0;
float3 mLightDir: TEXCOORD1;
float3 mViewDir: TEXCOORD2;
float3 T: TEXCOORD3;
float3 B: TEXCOORD4;
float3 N: TEXCOORD5;
};
sampler2D DiffuseSampler;
sampler2D SpecularSampler;
sampler2D NormalSampler;
float3 gLightColor;
float4 ps_main(PS_INPUT Input) : COLOR
{
//read normal from tex
float3 tangentNormal = tex2D( NormalSampler, Input.mUV ).xyz;
tangentNormal = normalize( tangentNormal * 2 - 1 ); //convert 0~1 to -1~+1.
//read from vertex shader
float3x3 TBN = float3x3( normalize(Input.T), normalize(Input.B),
normalize(Input.N) ); //transforms world=>tangent space
TBN = transpose( TBN ); //transform tangent space=>world
float3 worldNormal = mul( TBN, tangentNormal ); //note: mat * scalar
//(since TBN is row matrix)
float3 lightDir = normalize( Input.mLightDir );
float3 diffuse = saturate( dot(worldNormal, -lightDir) );
float4 albedo = tex2D( DiffuseSampler, Input.mUV );
diffuse = gLightColor * albedo.rgb * diffuse;
float3 specular = 0;
if ( diffuse.x > 0 )
{
float3 reflection = reflect( lightDir, worldNormal );
float3 viewDir = normalize( Input.mViewDir );
specular = saturate( dot(reflection, -viewDir) );
specular = pow( specular, 20.0f );
//further adjustments to specular (since texture is 2D)
float specularIntensity = tex2D( SpecularSampler, Input.mUV );
specular *= specularIntensity * gLightColor;
}
float3 ambient = float3(0.1f, 0.1f, 0.1f) * albedo;
return float4(ambient + diffuse + specular, 1);
}
The code works, but I don't quite understand why I need to do the
TBN = transpose( TBN ); in the pixel shader.
The TBN values I passed through the Vertex Shader are those in world space (hence why I multiplied gWorldMatrix), yet I'm told that
float3x3 TBN = float3x3( normalize(Input.T), normalize(Input.B),
normalize(Input.N) );
transforms world=>tangent(surface) space.
Why is this?
You need the line
TBN = transpose( TBN );
because you're multipling your tangent-space normal from the right to the matrix. Therefore it's considered as a column vector, while the base vectors are in the rows of the matrix. So the matrix must be transposed, that the base transformation can be applied. You can omit the transposition, if youre switch the multiplication to
float3 worldNormal = mul( tangentNormal, TBN );
Because your multiplied the T,N and B vector with the worldmatrix your TBN matrix transforms from tangent space to world space (TBN transforms into object-space, after that world transforms into worldspace). Other implementations multiply the TBN with the world inverse transpose matrix. With the resulting TBN you can transform the light vector from world into tangent space and compare it to the tangent normal. So I think the one who told you that TBN transforms world to tangent space uses this approach (It saves some performance, because the heavy matrix-operations are done in the vertexshader).

rotate object but keeping lighting fixed DirectX?

i am using directx11 , and i create light source ( parallel light source )and scene ( loaded from obj file ) , and every thing look correct but when i rotate some object in the scene , my light rotate too with the object , and i don't wont to rotate the light and keep it fixed .
i try to fix the problem by do something like this , but i fail :
XMMATRIX light_rotat = XMMatrixIdentity()
/*render the light*/
XMMATRIX light_rotat = XMMatrixRotationY(timeGetTime()/3500.0f);
/* render scene */
/* the hlsl code */
cbuffer LIGHT
{
float4 light_color ;
float3 Direction ;
float3 Position ;
float3 attribute ;
float Power ;
float range ;
float spotpower ;
};
cbuffer CAMERA
{
float4x4 view ;
float4x4 world ;
float4x4 proj ;
};
cbuffer local
{
float3 eye;
float4x4 localworld ;
};
Texture2D texture_obj ;
SamplerState Texture_sampler ;
/* diffuse texture */
float4 Get_Texture (float2 uv )
{
return texture_obj.Sample (Texture_sampler , uv );
}
/* light calc */
float4 Parrallel (float3 eye ,float3 Position ,float3 Normal , MTRL mtrl )
{
float3 lightvec = normalize(-Direction) ;
float4 LitColor = float4(0.0f , 0.0f ,0.0f ,0.0f );
float diff_factor = dot(lightvec , Normal );
float4 diff = light_color * mtrl.Diffuse_Mtrl ;
if (diff_factor > 0.0f )
{
float4 amb = light_color * mtrl.Ambient_Mtrl ;
float3 view = normalize(eye - Position) ;
float3 rf = normalize(reflect (Direction , Normal )) ;
float4 Spec_Fac = pow ( max ( dot (rf , view ) , 0.0f ) , max (1.0f ,Power ));
float4 Spec = light_color * mtrl.Specular_Mtrl ;
LitColor += ( diff *diff_factor) + (Spec_Fac*Spec) + (amb*diff_factor) ;
}
return LitColor ;
}
struct VS_INPUT
{
float4 Pos :POSITION ;
float3 Normal : NORMAL0 ;
float2 UV : TEXCOORD0 ;
};
struct VS_OUT
{
float4 Pos :SV_Position ;
float4 Posw : POSITION ;
float4 Normal : TEXCOORD1 ;
float2 UV : TEXCOORD0 ;
};
VS_OUT VS (VS_INPUT input)
{
VS_OUT v ;
v.Pos = mul ( input.Pos , localworld );
v.Pos = mul (v.Pos , world );
v.Pos = mul (v.Pos , view );
v.Pos = mul (v.Pos , proj );
v.Posw = mul ( input.Pos, localworld );
v.UV = input.UV ;
v.Normal = mul (float4(input.Normal , 0.0f) ,world );
return v ;
}
float4 PS (VS_OUT ps):SV_Target
{
MTRL y ;
y.Diffuse_Mtrl = Get_Texture (ps.UV);
y.Ambient_Mtrl = y.Diffuse_Mtrl /8 ;
y.Specular_Mtrl = float4 ( 0.5f , 0.5f , 0.5f , 0.0f );
float4 licolor = Parrallel ( eye , (float3)ps.Posw , (float3)ps.Normal ,y ) ;
return licolor ;
}
technique11 tech2
{
pass P0
{
SetVertexShader( CompileShader( vs_5_0, VS() ) );
SetPixelShader( CompileShader( ps_5_0, PS() ) );
}
}
this is the render loop
Device->Draw(Color);
t.localworld = ::XMMatrixTranspose ( XMMatrixRotationY ( timeGetTime() /3000.0f ) );
t.eye = XMFLOAT3( 0.0f, 10.0f, -30.0f );
local->UpdateSubresource (My_Buffer , 0 , NULL ,0, 0 , &t );
ID3DX11EffectConstantBuffer *cm = effect->Get_Effect()->GetConstantBufferByName ( "local");
cm->SetConstantBuffer ( Local->Get_Buffer());
effect->Apply(Flags , Context);
mesh->draw();
Device->EndDraw();
any help ?
Ok, the 'localworld' matrix should be the key here, what you need to do is create a rotation matrix Y , and multiply the localworld variable, with this rotation matrix, before you write the localworld variable to HLSL in your render loop. That should rotate you vertices only, and your light should stay static.