i am using directx11 , and i create light source ( parallel light source )and scene ( loaded from obj file ) , and every thing look correct but when i rotate some object in the scene , my light rotate too with the object , and i don't wont to rotate the light and keep it fixed .
i try to fix the problem by do something like this , but i fail :
XMMATRIX light_rotat = XMMatrixIdentity()
/*render the light*/
XMMATRIX light_rotat = XMMatrixRotationY(timeGetTime()/3500.0f);
/* render scene */
/* the hlsl code */
cbuffer LIGHT
{
float4 light_color ;
float3 Direction ;
float3 Position ;
float3 attribute ;
float Power ;
float range ;
float spotpower ;
};
cbuffer CAMERA
{
float4x4 view ;
float4x4 world ;
float4x4 proj ;
};
cbuffer local
{
float3 eye;
float4x4 localworld ;
};
Texture2D texture_obj ;
SamplerState Texture_sampler ;
/* diffuse texture */
float4 Get_Texture (float2 uv )
{
return texture_obj.Sample (Texture_sampler , uv );
}
/* light calc */
float4 Parrallel (float3 eye ,float3 Position ,float3 Normal , MTRL mtrl )
{
float3 lightvec = normalize(-Direction) ;
float4 LitColor = float4(0.0f , 0.0f ,0.0f ,0.0f );
float diff_factor = dot(lightvec , Normal );
float4 diff = light_color * mtrl.Diffuse_Mtrl ;
if (diff_factor > 0.0f )
{
float4 amb = light_color * mtrl.Ambient_Mtrl ;
float3 view = normalize(eye - Position) ;
float3 rf = normalize(reflect (Direction , Normal )) ;
float4 Spec_Fac = pow ( max ( dot (rf , view ) , 0.0f ) , max (1.0f ,Power ));
float4 Spec = light_color * mtrl.Specular_Mtrl ;
LitColor += ( diff *diff_factor) + (Spec_Fac*Spec) + (amb*diff_factor) ;
}
return LitColor ;
}
struct VS_INPUT
{
float4 Pos :POSITION ;
float3 Normal : NORMAL0 ;
float2 UV : TEXCOORD0 ;
};
struct VS_OUT
{
float4 Pos :SV_Position ;
float4 Posw : POSITION ;
float4 Normal : TEXCOORD1 ;
float2 UV : TEXCOORD0 ;
};
VS_OUT VS (VS_INPUT input)
{
VS_OUT v ;
v.Pos = mul ( input.Pos , localworld );
v.Pos = mul (v.Pos , world );
v.Pos = mul (v.Pos , view );
v.Pos = mul (v.Pos , proj );
v.Posw = mul ( input.Pos, localworld );
v.UV = input.UV ;
v.Normal = mul (float4(input.Normal , 0.0f) ,world );
return v ;
}
float4 PS (VS_OUT ps):SV_Target
{
MTRL y ;
y.Diffuse_Mtrl = Get_Texture (ps.UV);
y.Ambient_Mtrl = y.Diffuse_Mtrl /8 ;
y.Specular_Mtrl = float4 ( 0.5f , 0.5f , 0.5f , 0.0f );
float4 licolor = Parrallel ( eye , (float3)ps.Posw , (float3)ps.Normal ,y ) ;
return licolor ;
}
technique11 tech2
{
pass P0
{
SetVertexShader( CompileShader( vs_5_0, VS() ) );
SetPixelShader( CompileShader( ps_5_0, PS() ) );
}
}
this is the render loop
Device->Draw(Color);
t.localworld = ::XMMatrixTranspose ( XMMatrixRotationY ( timeGetTime() /3000.0f ) );
t.eye = XMFLOAT3( 0.0f, 10.0f, -30.0f );
local->UpdateSubresource (My_Buffer , 0 , NULL ,0, 0 , &t );
ID3DX11EffectConstantBuffer *cm = effect->Get_Effect()->GetConstantBufferByName ( "local");
cm->SetConstantBuffer ( Local->Get_Buffer());
effect->Apply(Flags , Context);
mesh->draw();
Device->EndDraw();
any help ?
Ok, the 'localworld' matrix should be the key here, what you need to do is create a rotation matrix Y , and multiply the localworld variable, with this rotation matrix, before you write the localworld variable to HLSL in your render loop. That should rotate you vertices only, and your light should stay static.
Related
Working my way through the "Introduction to 3D Game Programming with Directx 11". I am reworking the samples so as to not use the Effects Framework, so far all good.
However, I have come up with a problem that one of the constant buffers only partially updates.
CPU side CB struct:
struct CBPerFrame
{
DirectionalLight DirLight[3];
DirectX::XMFLOAT3 EyePositionW;
DirectX::XMFLOAT4 FogColour;
float FogStart;
float FogRange;
int LightNumber;
double Padding;
};
Where I update the CB prior to any drawing.
CBPerFrame cbPerFrame { };
cbPerFrame.DirLight[ 0 ] = mDirectionalLights[ 0 ];
cbPerFrame.DirLight[ 1 ] = mDirectionalLights[ 1 ];
cbPerFrame.DirLight[ 2 ] = mDirectionalLights[ 2 ];
cbPerFrame.EyePositionW = mEyePosW;
cbPerFrame.FogColour = XMFLOAT4( Colors::Black );
cbPerFrame.FogRange = 1.0F;
cbPerFrame.FogStart = 0.0F;
cbPerFrame.LightNumber = mLightCount;
cbPerFrame.Padding = 0.0;
mD3DImmediateContext->UpdateSubresource( mCBPerFrame.Get( ), 0, nullptr, &cbPerFrame, 0, 0 );
Pixel Shader:
cbuffer CBPerFrame : register( b0 )
{
DirectionalLight gDirectionalLights[ 3 ];
float3 gEyePosW;
float4 gFogColor;
float gFogStart;
float gFogRange;
int gLightCount;
double gPadding;
}
cbuffer CBPerObject : register( b1 )
{
matrix gWorld;
matrix gWorldInverseTranspose;
matrix gWorldViewProjection;
float4x4 gTextureTransform;
Material gMaterial;
}
float4 main( VertexOut input ) : SV_TARGET
{
// Interpolating normal can unnormalize it, so normalize it.
input.NormalW = normalize( input.NormalW );
// The toEye vector is used in lighting.
float3 toEye = normalize( gEyePosW - input.PositionW );
// Cache the distance to the eye from this surface point.
float distToEye = length( toEye );
// Normalize.
toEye /= distToEye;
//
// Lighting.
//
// Start with a sum of zero.
float4 ambient = float4( 0.0F, 0.0F, 0.0F, 0.0F );
float4 diffuse = float4( 0.0F, 0.0F, 0.0F, 0.0F );
float4 spec = float4( 0.0F, 0.0F, 0.0F, 0.0F );
// Sum the light contribution from each light source.
/* [unroll]*/
for ( int i = 0; i < gLightCount; i++ )
{
float4 A, D, S;
ComputeDirectionalLight( gMaterial, gDirectionalLights[ i ], input.NormalW, toEye, A, D, S );
ambient += A;
diffuse += D;
spec += S;
}
float4 litColour = ambient + diffuse + spec;
// Common to take alpha from diffuse material.
litColour.a = gMaterial.Diffuse.a;
return litColour;
}
gLightCount is always set to 0, even though it should be set to 2 at the start of the application. If I change the condition of the loop to a hardcoded 1/2/3 the shader works as it should.
I realise that there are extra variables in the CB, but the sample code has this and I believe it is used in further examples.
I think the issue is to do with how the CBPerFrame struct is padded, so it isn't been copied over to the GPU correctly. Any thoughts?
Thanks for any help.
It seems to be an issue with packing. According to Packing Rules for Constant Variables data should be packed at 4-byte boundaries but also so data blocks won't cross 16-byte boundary. In this case there will be definitely a padding after EyePositionW:
struct CBPerFrame
{
DirectionalLight DirLight[3];
DirectX::XMFLOAT3 EyePositionW;
float padding1;
DirectX::XMFLOAT4 FogColour;
Also i'm not sure why is there double gPadding; at the end. It should probably be another int.
Images with examples of the problem: http://imgur.com/gallery/vmMyk
Hi,
I need some help with rendering 2D objects in 3D scene with 3D camera. I think I managed to solve 2D coordinates with LH world coordinates. However, my rendered 2D objects are in a correct place, only when camera is at [0.0f, 0.0f, 0.0f] coordinates. In every other position, the location of 2D objects on scene is malformed. I think my matrices are screwed up, but don't know where to look further. I'd appreciate good ideas, please comment if something's missing for you, I'll edit the main post to provide you more information.
I'm using simple 3D color HLSL (VS and PS ver: 4.0) shader with alpha blending for the bigger triangle:
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float4 Color : COLOR;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
PS_INPUT VS ( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
input.Pos.w = 1.0f;
output.Pos = mul ( input.Pos, World );
output.Pos = mul ( output.Pos, View );
output.Pos = mul ( output.Pos, Projection );
output.Color = input.Color;
return output;
}
float4 PS ( PS_INPUT input ) : SV_Target
{
return input.Color;
}
That's my Vertex data struct:
struct Vertex
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT4 color;
Vertex() {};
Vertex(DirectX::XMFLOAT3 aPosition, DirectX::XMFLOAT4 aColor)
: position(aPosition)
, color(aColor)
{};
};
Render call for object:
bool PrimitiveMesh::Draw()
{
unsigned int stride = sizeof(Vertex);
unsigned int offset = 0;
D3DSystem::GetD3DDeviceContext()->IASetVertexBuffers(0, 1, &iVertexBuffer, &stride, &offset);
D3DSystem::GetD3DDeviceContext()->IASetIndexBuffer(iIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
D3DSystem::GetD3DDeviceContext()->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return true;
}
Draw call with initialization:
static PrimitiveMesh* mesh;
if (mesh == 0)
{
std::vector<PrimitiveMesh::Vertex> vertices;
mesh = new PrimitiveMesh();
DirectX::XMFLOAT4 color = { 186 / 256.0f, 186 / 256.0f, 186 / 256.0f, 0.8f };
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 0.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 600.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(800.0f, 600.0f, 0.0f), color });
mesh->SetVerticesAndIndices(vertices);
}
// Getting clean matrices here:
D3D::Matrices(world, view, projection, ortho);
iGI->TurnZBufferOff();
iGI->TurnOnAlphaBlending();
mesh->Draw();
XMMATRIX view2D = Camera::View2D();
iColorShader->Render(iGI->GetContext(), 3, &world, &view2D, &ortho);
iGI->TurnZBufferOn();
These are my 2D calculations for camera:
up = DirectX::XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f);
lookAt = DirectX::XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
rotationMatrix = DirectX::XMMatrixRotationRollPitchYaw(0.0f, 0.0f, 0.0f); // (pitch, yaw, roll);
up = DirectX::XMVector3TransformCoord(up, rotationMatrix);
lookAt = DirectX::XMVector3TransformCoord(lookAt, rotationMatrix) + position;
view2D = DirectX::XMMatrixLookAtLH(position, lookAt, up);
I'll appreciate any help.
Kind regards.
With Shaders, you are not forced to use matrices, you have the flexibility to simplify the problem.
Let say you render 2d objects, using coordinates in pixels, the only requirement, is to scale offset them back into the normalized projective space.
A vertex shader could be as short as that :
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
PS_INPUT VS ( VS_INPUT input ) {
PS_INPUT output;
output.Pos.xy = input.Pos.xy * rcpDim * 2; // from pixel to [0..2]
output.Pos.xy -= 1; // to [-1..1]
output.Pos.y *= -1; // because top left in texture space is bottom left in projective space
output.Pos.zw = float2(0,1);
output.Color = input.Color;
return output;
}
You can of course build a set of matrices achieving the same result with your original shader, just set World and View to identity and projection to an ortho projection with XMMatrixOrthographicOffCenterLH(0,width,0,height,0,1). But as you are beggining with 3D programming, you will soon have to learn to deal with multiple shaders anyway, so take it as an exercice.
Well, I fixed my problem. For some weird reason, DirectXMath was generating false XMMATRIX. My XMMatrixOrtographicLH() was completely incorrect for good parameters. I solved my problem with classic definition of Ortohraphic matrix, found in this article (definition in Fig. 10)
auto orthoMatrix = DirectX::XMMatrixIdentity();
orthoMatrix.r[0].m128_f32[0] = 2.0f / Engine::VideoSettings::Current()->WindowWidth();
orthoMatrix.r[1].m128_f32[1] = 2.0f / Engine::VideoSettings::Current()->WindowHeight();
orthoMatrix.r[2].m128_f32[2] = -(2.0f / (screenDepth - screenNear));
orthoMatrix.r[2].m128_f32[3] = -(screenDepth + screenNear) / (screenDepth - screenNear);
galop1n give a good solution but on my system
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
NEED to be a multiple of 16 for made like here:
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
DirectX::XMFLOAT2 rcpDim2;
};
// Supply the vertex shader constant data.
VS_CONSTANT_BUFFER VsConstData;
VsConstData.rcpDim = { 2.0f / w,2.0f / h};
// Fill in a buffer description.
D3D11_BUFFER_DESC cbDesc;
ZeroMemory(&cbDesc, sizeof(cbDesc));
cbDesc.ByteWidth = sizeof(VS_CONSTANT_BUFFER);
cbDesc.Usage = D3D11_USAGE_DYNAMIC;
cbDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
cbDesc.MiscFlags = 0;
cbDesc.StructureByteStride = 0;
// Fill in the subresource data.
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = &VsConstData;
InitData.SysMemPitch = 0;
InitData.SysMemSlicePitch = 0;
// Create the buffer.
HRESULT hr = pDevice->CreateBuffer(&cbDesc, &InitData,
&pConstantBuffer11);
or aligned
__declspec(align(16))
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
};
Hi I am just learning directx11 and have come across a problem with PSSetShaderResources
if I change textures before the call to swapchain->Present only the first texture is displayed.
if I change textures between Present calls both are displayed but on consecutive frames.
Is there anyway of changing textures with PSSetShaderResources so I can use both (or more) on a single frame?
I know I can use texture arrays but it appears to me that you must have same size textures?
also I could upload two textures (or more) at a time but I would then have to have conditional statements within shader.
Below is the drawing loop I am using. And the simple shader that I am using
any help would be appreciated.
Paul
24OCT2014
tempBool = false;
for (int j = 0; j < 2; j++) //change 2 to texCount
{
devcon->UpdateSubresource(pConstantBuffer, 0, NULL, &cb, 0, 0);
devcon->VSSetConstantBuffers(0, 1, &pConstantBuffer);
devcon->PSSetConstantBuffers(0, 1, &pConstantBuffer);
tempBool = !tempBool;
if (tempBool)
{
devcon->PSSetShaderResources(0, 1, &pTex[0]);
}
else
{
devcon->PSSetShaderResources(0, 1, &pTex[1]);
}
for (int i = 0; i < texRun[j]; i++)
{
devcon->Draw(obLens[curPos+i], obStarts[curPos+i]);
}
curPos += texRun[j];
}
swapchain->Present(0, 0);
Texture2D txDiffuse : register( t0 );
SamplerState samLinear : register( s0 );
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
float4 vLightDir;
float4 vLightColor;
};
struct VOut
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float2 Tex : TEXCOORD;
};
VOut VShader(float4 position : POSITION, float3 Norm : NORMAL, float2 Tex : TEXCOORD)
{
VOut output = (VOut)0;
output.Pos = mul( position, World );
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul(Norm, World);
output.Tex = Tex;
return output;
}
float4 PShader0(float4 position : SV_POSITION, float3 Norm : NORMAL,
float2 Tex : TEXCOORD ) : SV_TARGET
{
float4 diffuse = 0;
diffuse = txDiffuse.Sample( samLinear, Tex );
float4 finalColor = 0;
finalColor = diffuse; // * 0.2;
finalColor += saturate( dot((float3)vLightDir,Norm) * diffuse);
finalColor.a = 1.0;
return finalColor;
}
Finally tracked down the error it was in my object loading code and nothing to do width direct3d
I was Loading second lot of objects over the first lot
for first lot of objects I am reading into verts which is my vertex data pointer
for the second lot I was also reading into verts instead of &verts[firstVertsCount]
anyway thanks for the help
Paul
I'm new to Directx 11, and I programmed a distance dependent point light shader that works pretty well for rotated and translated objects, but after I tried scaling my models, the lighting got dimmer if I scaled the model larger, and the lighting got brighter if I scaled the model smaller. I thought it might be the normals, but I made sure to multiply them by the inverse transpose of the world matrix, and I made sure to normalize them in the pixel shader after they are interpolated. Here is the shader code:
Texture2D txDiffuse : register( t0 );
SamplerState samAnisotropic
{
Filter = ANISOTROPIC;
MaxAnisotropy = 4;
};
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
matrix WorldInvTrans;
float3 LightPos;
float pad1;
float3 EyePos;
float pad2;
float3 At;
float pad3;
float showNorms;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float3 Norm : NORMAL;
float2 TexCoor : TEXCOORD0;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float3 LightDir : POSITION0;
float3 EyeVector : POSITION1;
float2 TexCoor : TEXCOORD0;
float distance : FLOAT0;
float showNorms : FLOAT1;
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.LightDir = normalize( LightPos - output.Pos );
output.EyeVector = normalize( EyePos - At );
output.distance = distance( LightPos, output.Pos);
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul( input.Norm, WorldInvTrans );
output.TexCoor = input.TexCoor;
output.showNorms = showNorms;
return output;
}
float4 PS( PS_INPUT input) : SV_Target
{
input.Norm = normalize( input.Norm );
float specTerm = 0;
float3 ReflVector = normalize( reflect( input.LightDir, input.Norm ) );
[flatten]
if ( dot( ReflVector, input.EyeVector ) >= 0 )
{
specTerm = pow( dot( ReflVector, input.EyeVector ) , 50 );
}
float diffuseTerm = saturate( dot( input.LightDir, input.Norm ) );
float4 ambient = float4( 0.25f, 0.25f, 0.25f, 1.0f );
float4 lightColor = float4( 1.0f, 1.0f, 1.0f, 1.0f );
return ( (ambient + (diffuseTerm + specTerm) / (pow( input.distance, 1 ) * 0.025f)) * lightColor * txDiffuse.Sample( samAnisotropic, input.TexCoor ) ) * ( 1 - input.showNorms ) + float4( input.Norm, 1.0f ) * input.showNorms;
}
I was still suspicious that the normals weren't correct, so I edited the last line in my pixel shader to shade the model based on the normal vectors if showNorms = 1.0f. The normals looked like they were transformed correctly. Still suspicious, I replaced my model with a plane on the XZ axis, and scaled it up 50 times. When I rendered it, the lighting was still dim, but the plane was green when I set showNorms to 1.0f, which must mean that the normals are all pointing in the upwards Y direction. If I'm transforming my normals correctly and normalizing them, what could be causing these lighting errors?
If this helps, here is my code when I set the constant buffers for the plane:
//Render Plane
mWorld = XMMatrixIdentity();
cb1.mWorld = XMMatrixTranspose( XMMatrixMultiply( XMMatrixMultiply( mWorld, XMMatrixScaling( 50.0f, 1.0f, 50.0f ) ), XMMatrixTranslation( 0.0f, -5.0f, 0.0f ) ) );
XMMATRIX A = cb1.mWorld;
A.r[3] = XMVectorSet(0.0f, 0.0f, 0.0f, 1.0f);
det = XMMatrixDeterminant(A);
cb1.mWorldInvTrans = XMMatrixInverse(&det, A);
g_pImmediateContext->UpdateSubresource( g_pcBufferShader1, 0, NULL, &cb1, 0, 0 );
Edit: I changed the code a little bit to fix the specTerm:
Texture2D txDiffuse : register( t0 );
SamplerState samAnisotropic
{
Filter = ANISOTROPIC;
MaxAnisotropy = 4;
};
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
matrix WorldInvTrans;
float3 LightPos;
float pad1;
float3 EyePos;
float pad2;
float3 At;
float pad3;
float showNorms;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float3 Norm : NORMAL;
float2 TexCoor : TEXCOORD0;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float3 LightDir : POSITION0;
float3 EyeVector : POSITION1;
float2 TexCoor : TEXCOORD0;
float distance : FLOAT0;
float showNorms : FLOAT1;
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.LightDir = LightPos - output.Pos;
output.EyeVector = EyePos - At;
output.distance = distance( LightPos, output.Pos );
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul( input.Norm, WorldInvTrans );
output.TexCoor = input.TexCoor;
output.showNorms = showNorms;
return output;
}
float4 PS( PS_INPUT input) : SV_Target
{
input.Norm = normalize( input.Norm );
input.LightDir = normalize( input.LightDir );
input.EyeVector = normalize( input.EyeVector );
float specTerm = 0;
float3 ReflVector = normalize( reflect( -input.LightDir, input.Norm ) );
[flatten]
if ( dot( ReflVector, input.EyeVector ) >= 0 )
{
specTerm = pow( dot( ReflVector, input.EyeVector ) , 50 );
}
float diffuseTerm = saturate( dot( input.LightDir, input.Norm ) );
float4 ambient = float4( 0.25f, 0.25f, 0.25f, 1.0f );
float4 lightColor = float4( 1.0f, 1.0f, 1.0f, 1.0f );
return ( (ambient + (diffuseTerm + specTerm) / (pow( input.distance, 1 ) * 0.025f)) * lightColor * txDiffuse.Sample( samAnisotropic, input.TexCoor ) ) * ( 1 - input.showNorms ) + float4( input.Norm, 1.0f ) * input.showNorms;
}
I think you should try to normalize the LightDir vector in the pixel shader as well. If the plane is really large it may happen, that after the interpolation of these two vectors, the vector you get in the pixel shader is not normalized. This error is likely to increase as the scale goes up. Give it a try. The picture below shows this problem.
I'm trying to clip a texture by hardcoding the texture coordinates through 0 and 1, and then sending a constantbuffer containing a 3x3 texture transform matrix to the vertexshader.
However, the texture is not rendering as I expected it to. I'm not sure where I went wrong. Could someone help? See code below.
For testing, I'm trying to use an Identity matrix to keep the texture coordinates untouched, but the texture shows up transformed in a very weird way.
This is the texture: (the colours showing up are actually transparent areas, except the black colour and the softer red colour of the heart)
Transformed texture:
HLSL:
cbuffer cbChangesPerFrame : register( b0 )
{
matrix g_Mvp;
};
cbuffer cbTexTransform : register( b2 )
{
float3x3 g_TexTransform;
};
Texture2D g_ColorMap : register(t0);
SamplerState g_ColorSampler : register(s0);
struct VS_Input
{
float4 pos : POSITION0;
float2 tex0 : TEXCOORD0;
};
struct PS_Input
{
float4 pos : SV_POSITION0;
float2 tex0 : TEXCOORD0;
};
PS_Input VS_Main(VS_Input vertex)
{
PS_Input vsOut = (PS_Input)0;
vsOut.pos = mul(vertex.pos,g_Mvp);
//vsOut.tex0 = vertex.tex0;
float3 coord = float3(vertex.tex0, 1.0);
coord = mul(coord, g_TexTransform);
vsOut.tex0 = coord.xy ;
return vsOut;
}
float4 PS_Main( PS_Input frag ) : SV_TARGET
{
return g_ColorMap.Sample( g_ColorSampler, frag.tex0 );
}
VBuffer hardcoded:
Vertex::PosTex vertices[]=
{
{XMFLOAT3( 0.5f, 0.5f, 1.0f ), XMFLOAT2( 1.0f, 0.0f )},
{XMFLOAT3( 0.5f, -0.5f, 1.0f ), XMFLOAT2( 1.0f, 1.0f )},
{XMFLOAT3( -0.5f, -0.5f, 1.0f ), XMFLOAT2( 0.0f, 1.0f )},
{XMFLOAT3( -0.5f, -0.5f, 1.0f ), XMFLOAT2( 0.0f, 1.0f )},
{XMFLOAT3( -0.5f, 0.5f, 1.0f ), XMFLOAT2( 0.0f, 0.0f )},
{XMFLOAT3( 0.5f, 0.5f, 1.0f ), XMFLOAT2( 1.0f, 0.0f )}
};
Matrix definition:
XMFLOAT3X3 f3x3;
f3x3.m[0][0] = 1.0f; f3x3.m[0][1] = 0.0f; f3x3.m[0][2] = 0.0f;
f3x3.m[1][0] = 0.0f; f3x3.m[1][1] = 1.0f; f3x3.m[1][2] = 0.0f;
f3x3.m[2][0] = 0.0f; f3x3.m[2][1] = 0.0f; f3x3.m[2][2] = 1.0f;
GAME_MANAGER->GMSetTexTransformMatrix(f3x3);
Game_Manager GMSetTransformMatrix() definition:
void GameManager::GMSetTexTransformMatrix( const XMFLOAT3X3& rkTexTransform )
{
m_pContext->UpdateSubresource(m_pTexTransformCB,0,0,&rkTexTransform,0,0);
m_pContext->VSSetConstantBuffers(2,1,&m_pTexTransformCB);
}
Buffer Initialisation:
ZeroMemory(&constDesc, sizeof(constDesc));
constDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
constDesc.ByteWidth = 48;
constDesc.Usage = D3D11_USAGE_DEFAULT;
result = m_pDevice->CreateBuffer(&constDesc,0,&m_pTexTransformCB);
The problem is the 16 byte alignment. An XMFLOAT3X3 is just 9 floats in a row. When this gets stored in registers, its just going to take the first four floats and put them in c0, the next four in c1, and the remaining float in c2.x. You can see this yourself with the following:
cbuffer cbChangesEveryFrame : register( b1 )
{
float a1 : packoffset(c0.x);
float a2 : packoffset(c0.y);
float a3 : packoffset(c0.z);
float b1 : packoffset(c0.w);
float b2 : packoffset(c1.x);
float b3 : packoffset(c1.y);
float c1 : packoffset(c1.z);
float c2 : packoffset(c1.w);
float c3 : packoffset(c2.x);
};
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
float3 coord = float3(input.Tex, 1.0);
float3x3 g_TexTransform = {a1, a2, a3, b1, b2, b3, c1, c2, c3};
coord = mul(coord, g_TexTransform);
output.Tex = coord.xy;
return output;
}
Now when you pass your XMFLOAT3X3, the texture appears normally. The problem was that because of the register allocation, your texture transform matrix became screwed up. When you look at the registers, this is how your data looks coming in:
c0: 1 0 0 0
c1: 1 0 0 0
c2: 1
float3x3's are probably an array of float3's, so it would take the first three components of each register, giving you:
1, 0, 0
1, 0, 0
1, 0, 0
Its scaling your Y to 0, which is giving that wierd stretchy look. To solve this, you're going to either have to store your transform in a 4x4 matrix, or manually assign each part of the register. Switching to three float3's wouldn't work either, because they can't cross the 16-byte boundary.