I am working on a C++ 3D game and I am having trouble with how the mesh looks when I am loading it from a glTF file on the screen.
This is the 3D model I am trying to load.
I am using DirectX 11.
As you can see the mesh is flipped on the X-axis and I can't figure what to do to flip to look normal.
This is how I want it to look.
struct VS_INPUT
{
float4 position: POSITION0;
float2 texcoord: TEXCOORD0;
float3 normal: NORMAL0;
};
struct VS_OUTPUT
{
float4 position: SV_POSITION;
float2 texcoord: TEXCOORD0;
float3 normal: NORMAL0;
float3 direction_to_camera: TEXCOORD1;
};
cbuffer constant: register(b0)
{
row_major float4x4 m_world;
row_major float4x4 m_view;
row_major float4x4 m_proj;
float4 m_light_direction;
float4 m_camera_position;
float4 m_light_position;
float m_light_radius;
};
VS_OUTPUT vsmain(VS_INPUT input)
{
VS_OUTPUT output = (VS_OUTPUT)0;
// WORLD SPACE
output.position = mul(input.position, m_world);
output.direction_to_camera = normalize(output.position - m_camera_position.xyz);
// VIEW SPACE
output.position = mul(output.position, m_view);
// SCREEN SPACE
output.position = mul(output.position, m_proj);
output.texcoord = input.texcoord;
output.normal = normalize(mul(input.normal, m_world));
return output;
}
This is the vertex shader I am using.
output.position.x = -output.position.x; in vertex shader
or when loading your mesh and parsing vertices invert x coordinate of vertex.
v.x = -v.x;
or scaling -1 on x axis should do the trick.
SetScale( -1.0f, 1.0f, 1.0f );
Related
Some 3d meshes that get exported to Wavefront.obj format usually come with a .mtl file that has additional data to the texture it uses and its materials, when exported from Blender they always come with Ambient, Diffuse, Specular, and Emissive RGB data as part of its material, but I'm not sure how I can use this data in the pixel shader and get the right color output.
I would appreciate it if anyone can explain to me how to use these materials and any code sample would be very welcome.
Traditional materials and lighting models use "Ambient", "Diffuse", "Specular", and "Emissive" colors which is why you find those in Wavefront OBJ files. These can often be replaced or used in multiplicative conjunction with texture colors.
The (now defunct) XNA Game Studio product did a good job of providing simple 'classic' shaders in the BasicEffect "Stock Shaders". I use them in the DirectX Tool Kit for DX11 and DX12.
Take a look at BasicEffect.fx for a traditional material pixel shader. If you are looking mostly for pixel-shader handling, that's "per-pixel lighting" as opposed to "vertex lighting" which was more common back when we had less powerful GPUs.
Here's a 'inlined' version so you can follow it all in one place:
struct VSInputNmTx
{
float4 Position : SV_Position;
float3 Normal : NORMAL;
float2 TexCoord : TEXCOORD0;
};
Texture2D<float4> Texture : register(t0);
sampler Sampler : register(s0);
cbuffer Parameters : register(b0)
{
float4 DiffuseColor : packoffset(c0);
float3 EmissiveColor : packoffset(c1);
float3 SpecularColor : packoffset(c2);
float SpecularPower : packoffset(c2.w);
float3 LightDirection[3] : packoffset(c3);
float3 LightDiffuseColor[3] : packoffset(c6);
float3 LightSpecularColor[3] : packoffset(c9);
float3 EyePosition : packoffset(c12);
float3 FogColor : packoffset(c13);
float4 FogVector : packoffset(c14);
float4x4 World : packoffset(c15);
float3x3 WorldInverseTranspose : packoffset(c19);
float4x4 WorldViewProj : packoffset(c22);
};
struct VSOutputPixelLightingTx
{
float2 TexCoord : TEXCOORD0;
float4 PositionWS : TEXCOORD1;
float3 NormalWS : TEXCOORD2;
float4 Diffuse : COLOR0;
float4 PositionPS : SV_Position;
};
// Vertex shader: pixel lighting + texture.
VSOutputPixelLighting VSBasicPixelLightingTx(VSInputNmTx vin)
{
VSOutputPixelLighting vout;
vout.PositionPS = mul(vin.Position, WorldViewProj);
vout.PositionWS.xyz = mul(vin.Position, World).xyz;
// ComputeFogFactor
vout.PositionWS.w = saturate(dot(vin.Position, FogVector));
vout.NormalWS = normalize(mul(vin.Normal, WorldInverseTranspose));
vout.Diffuse = float4(1, 1, 1, DiffuseColor.a);
vut.TexCoord = vin.TexCoord;
return vout;
}
struct PSInputPixelLightingTx
{
float2 TexCoord : TEXCOORD0;
float4 PositionWS : TEXCOORD1;
float3 NormalWS : TEXCOORD2;
float4 Diffuse : COLOR0;
};
// Pixel shader: pixel lighting + texture.
float4 PSBasicPixelLightingTx(PSInputPixelLightingTx pin) : SV_Target0
{
float4 color = Texture.Sample(Sampler, pin.TexCoord) * pin.Diffuse;
float3 eyeVector = normalize(EyePosition - pin.PositionWS.xyz);
float3 worldNormal = normalize(pin.NormalWS);
ColorPair lightResult = ComputeLights(eyeVector, worldNormal, 3);
color.rgb *= lightResult.Diffuse;
// AddSpecular
color.rgb += lightResult.Specular * color.a;
// ApplyFog (we passed fogfactor in via PositionWS.w)
color.rgb = lerp(color.rgb, FogColor * color.a, pin.PositionWS.w);
return color;
}
Here is the helper function ComputeLights which implements a Blinn-Phong reflection model for the specular highlight.
struct ColorPair
{
float3 Diffuse;
float3 Specular;
};
ColorPair ComputeLights(float3 eyeVector, float3 worldNormal, uniform int numLights)
{
float3x3 lightDirections = 0;
float3x3 lightDiffuse = 0;
float3x3 lightSpecular = 0;
float3x3 halfVectors = 0;
[unroll]
for (int i = 0; i < numLights; i++)
{
lightDirections[i] = LightDirection[i];
lightDiffuse[i] = LightDiffuseColor[i];
lightSpecular[i] = LightSpecularColor[i];
halfVectors[i] = normalize(eyeVector - lightDirections[i]);
}
float3 dotL = mul(-lightDirections, worldNormal);
float3 dotH = mul(halfVectors, worldNormal);
float3 zeroL = step(0, dotL);
float3 diffuse = zeroL * dotL;
float3 specular = pow(max(dotH, 0) * zeroL, SpecularPower) * dotL;
ColorPair result;
result.Diffuse = mul(diffuse, lightDiffuse) * DiffuseColor.rgb + EmissiveColor;
result.Specular = mul(specular, lightSpecular) * SpecularColor;
return result;
}
These BasicEffect shaders don't make use of ambient color, but you could modify them to do so if you wanted. All ambient color does is provide a 'minimum color value' that's independent of dynamic lights.
Note that there also some unofficial Physically-Based Rendering (PBR) materials extension in some Wavefront OBJ files. See Extending Wavefront MTL for Physically-Based. More modern geometry formats like glTF assume PBR materials properties which is things like an albedo texture, normal texture, roughness/metalness texture, etc..
We have some media processing application, and we need ability for rotate frames.
We use GPU.
We have Vertex Shader in HLSL with follow code:
struct VS_INPUT
{
float4 Pos : POSITION;
float2 Tex : TEXCOORD;
uint TexIdx : TEXINDEX;
};
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float2 Tex : TEXCOORD;
uint TexIdx : TEXINDEX;
};
VS_OUTPUT VS(VS_INPUT input)
{
VS_OUTPUT output;
float2 pos_rotate = input.Pos.xy;
float rads = radians(45);
float cFlare = cos(rads);
float sFlare = sin(rads);
output.Pos = input.Pos;
output.Pos.xy = mul(pos_rotate, float2x2(cFlare, -sFlare, sFlare, cFlare));
output.Tex = input.Tex;
output.TexIdx = input.TexIdx;
return output;
};
So, as I understand, sin and cos get angle in radians.
radians get angle in degrees.
But as we see, the frame came rotated to 90° and not 45°.
ОК, the problem in C++ code, in other call stack the frame rotates as expected. But it difficult now to provide the C++ code... I think, I will put the frame just in right call stack.
I'm trying to run a practice of D3D 11 rendering system to load and render FBX files but I have a problem transforming vertex in vertex shader.
I don't suppose what is wrong, in Visual Studio Graphics Debugger I can see the mesh passed to pipeline is OK in the input assembler stage but after vertex shader transformations all breaks out and render go wrong, if someone can tell me what's go wrong I'll appreciate the info.
View of the Input Assembler Stage
View of the Vertex Shader Output
This is the Vertex Shader code
cbuffer MatrixBuffer
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;
};
struct VertexInputType
{
float4 position : POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float3 normal : NORMAL;
float2 uv : TEXCOORD;
};
PixelInputType TextureVertexShader(VertexInputType input)
{
PixelInputType output;
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
output.normal = input.normal;
output.uv = input.uv;
return output;
}
And this is the code of matrix initialization
float lFieldOfView = 3.141592f * 0.4f;
float lScreenAspect = static_cast<float>(width_) / static_cast<float>(height_);
DirectX::XMMATRIX lProjection = MatrixDirectX::XMMatrixPerspectiveFovLHlFieldOfView, lScreenAspect, 1.0f, 10000.0f);
DirectX::XMStoreFloat4x4(&lMatrixBuffer.projectionMatrix, lProjectionMatrix);
DirectX::XMMATRIX lWorldMatrix = DirectX::XMMatrixIdentity();
DirectX::XMStoreFloat4x4(&lMatrixBuffer.worldMatrix, lWorldMatrix);
DirectX::XMFLOAT3 lookAtPos(0.0f, 0.0f, 0.0f);
DirectX::XMFLOAT3 eyePos(0.0f, 0.0f, -50.0f);
DirectX::XMFLOAT3 upDir(0.0f, 1.0f, 0.0f);
DirectX::FXMVECTOR lLookAtPos = DirectX::XMLoadFloat3(&lookAtPos);
DirectX::FXMVECTOR lEyePos = DirectX::XMLoadFloat3(&eyePos);
DirectX::FXMVECTOR lUpDir = DirectX::XMLoadFloat3(&upDir);
DirectX::XMMATRIX lViewMatrix = DirectX::XMMatrixLookAtLH(lEyePos, lLookAtPos, lUpDir);
DirectX::XMStoreFloat4x4(&lMatrixBuffer.viewMatrix, lViewMatrix);
D3D11_BUFFER_DESC lBufferDesc = { 0 };
lBufferDesc.ByteWidth = sizeof(MatrixBufferType);
lBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
lBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
lBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
D3D11_SUBRESOURCE_DATA lMatrixBufferData;
lMatrixBufferData.pSysMem = &lMatrixBuffer;
hResult = D3DDevice_->CreateBuffer(&lBufferDesc, &lMatrixBufferData, &D3DMatrixBuffer_);
From the comment it looks like the issue actually might be a matrix row-major/column-major difference. The original HLSL code would probably work if the matrices would be transposed on the CPU side.
I have the following vertex and pixel shaders:
struct VS_INPUT
{
float4 Position : POSITION0;
float2 TexCoord : TEXCOORD0;
float4 Color : TEXCOORD1;
};
struct VS_OUTPUT
{
float4 Position : POSITION0;
float4 Color : COLOR0;
float2 TexCoord : TEXCOORD0;
};
float4x4 projview_matrix;
VS_OUTPUT vs_main(VS_INPUT Input)
{
VS_OUTPUT Output;
Output.Position = mul(Input.Position, projview_matrix);
Output.Color = Input.Color;
Output.TexCoord = Input.TexCoord;
return Output;
}
px
texture tex;
sampler2D s = sampler_state {
texture = <tex>;
};
float4 ps_main(VS_OUTPUT Input) : COLOR0
{
float4 pixel = tex2D(s, Input.TexCoord.xy);
return pixel;
}
This is for a 2d game. The vertices of the quads contain tinting colors that I want to use to tint the bitmap. How can I obtain the color of the current vertex so I can multiply it in the pixel shader by the current pixel color?
Thanks
In your pixel shader, do:
float4 pixel = tex2D(s, Input.TexCoord.xy) * Input.Color;
The Input.Color value will be linearly interpreted across your plane for you, just like Input.TexCoord is. To blend two color vectors together, you simply multiply them together. It may also be advisable to do:
float4 pixel = tex2D(s, Input.TexCoord.xy) * Input.Color;
pixel = saturate(pixel);
The saturate() function will clip each RGB value in your color in the range of 0.0 to 1.0, which may avoid any possible display artifacts.
I'm currently having a problem with lighting in Directx 11, actually its ambient lighting. Here is the code:
cbuffer ConstantBuffer
{
float4x4 final;
float4x4 rotation; // the rotation matrix
float4 lightvec; // the light's vector
float4 lightcol; // the light's color
float4 ambientcol; // the ambient light's color
}
struct VOut
{
float4 color : COLOR;
float4 position : SV_POSITION;
};
VOut VShader(float4 position : POSITION, float4 normal : NORMAL)
{
VOut output;
output.position = mul(final, position);
// set the ambient light
output.color = ambientcol;
// calculate the diffuse light and add it to the ambient light
float4 norm = normalize(mul(rotation, normal));
float diffusebrightness = saturate(dot(norm, lightvec));
output.color += lightcol * diffusebrightness;
return output;
}
float4 PShader(float4 color : COLOR) : SV_TARGET
{
return color;
}
Then i send the values to the shader:
ambLight.LightVector = D3DXVECTOR4(1.0f, 1.0f, 1.0f, 0.0f);
ambLight.LightColor = D3DXCOLOR(0.5f, 0.5f, 0.5f, 1.0f);
ambLight.AmbientColor = D3DXCOLOR(0.2f, 0.2f, 0.2f, 1.0f);
ShaderManager.UpdateSubresourceDiffuseShader(devcon);
And then i get the following:
Why?
I tried your shader and seems to work, so maybe some variable are not passed correctly.
you could try to directly set one variable name to color output:
output.color = lightvec;
and
output.color = lightcol;
As a start, so you could double check values are passed properly.