I've been trying to code a geometry shader in order to generate billboard systems as explained in Frank Luna's book Introduction to 3D Game Programming with DirectX. I insert the shader into my technique as follows:
pass P3
{
SetVertexShader(CompileShader(vs_4_0, VS()));
SetGeometryShader(CompileShader(gs_4_0, GS()));
SetPixelShader(CompileShader( ps_4_0, PS_NoSpecular()));
SetBlendState(SrcAlphaBlendingAdd, float4( 0.0f, 0.0f, 0.0f, 0.0f ), 0xFFFFFFFF );
}
However, this gives the error 1>D3DEffectCompiler : error : No valid VertexShader-GeometryShader combination could be found in Technique render, Pass P3.
The structs for both VS and GS are given below:
struct GS_INPUT
{
float4 PosH : SV_POSITION;
float4 PosW : POSITION;
float2 Tex : TEXCOORD;
float3 N : NORMAL;
float3 Tangent : TANGENT;
float3 Binormal : BINORMAL;
};
struct VS_INPUT
{
float4 Pos : POSITION;
float2 Tex : TEXCOORD;
float3 N : NORMAL;
float3 Tangent : TANGENT;
float3 Binormal : BINORMAL;
};
Should they be the same?
The problem lies with the struct used to input and output from the vertex shader, geometry shader and pixel shader. If the vertex shader outputs a PS_INPUT type, then the geometry shader needs to use that PS_INPUT as its input type.
Related
Some 3d meshes that get exported to Wavefront.obj format usually come with a .mtl file that has additional data to the texture it uses and its materials, when exported from Blender they always come with Ambient, Diffuse, Specular, and Emissive RGB data as part of its material, but I'm not sure how I can use this data in the pixel shader and get the right color output.
I would appreciate it if anyone can explain to me how to use these materials and any code sample would be very welcome.
Traditional materials and lighting models use "Ambient", "Diffuse", "Specular", and "Emissive" colors which is why you find those in Wavefront OBJ files. These can often be replaced or used in multiplicative conjunction with texture colors.
The (now defunct) XNA Game Studio product did a good job of providing simple 'classic' shaders in the BasicEffect "Stock Shaders". I use them in the DirectX Tool Kit for DX11 and DX12.
Take a look at BasicEffect.fx for a traditional material pixel shader. If you are looking mostly for pixel-shader handling, that's "per-pixel lighting" as opposed to "vertex lighting" which was more common back when we had less powerful GPUs.
Here's a 'inlined' version so you can follow it all in one place:
struct VSInputNmTx
{
float4 Position : SV_Position;
float3 Normal : NORMAL;
float2 TexCoord : TEXCOORD0;
};
Texture2D<float4> Texture : register(t0);
sampler Sampler : register(s0);
cbuffer Parameters : register(b0)
{
float4 DiffuseColor : packoffset(c0);
float3 EmissiveColor : packoffset(c1);
float3 SpecularColor : packoffset(c2);
float SpecularPower : packoffset(c2.w);
float3 LightDirection[3] : packoffset(c3);
float3 LightDiffuseColor[3] : packoffset(c6);
float3 LightSpecularColor[3] : packoffset(c9);
float3 EyePosition : packoffset(c12);
float3 FogColor : packoffset(c13);
float4 FogVector : packoffset(c14);
float4x4 World : packoffset(c15);
float3x3 WorldInverseTranspose : packoffset(c19);
float4x4 WorldViewProj : packoffset(c22);
};
struct VSOutputPixelLightingTx
{
float2 TexCoord : TEXCOORD0;
float4 PositionWS : TEXCOORD1;
float3 NormalWS : TEXCOORD2;
float4 Diffuse : COLOR0;
float4 PositionPS : SV_Position;
};
// Vertex shader: pixel lighting + texture.
VSOutputPixelLighting VSBasicPixelLightingTx(VSInputNmTx vin)
{
VSOutputPixelLighting vout;
vout.PositionPS = mul(vin.Position, WorldViewProj);
vout.PositionWS.xyz = mul(vin.Position, World).xyz;
// ComputeFogFactor
vout.PositionWS.w = saturate(dot(vin.Position, FogVector));
vout.NormalWS = normalize(mul(vin.Normal, WorldInverseTranspose));
vout.Diffuse = float4(1, 1, 1, DiffuseColor.a);
vut.TexCoord = vin.TexCoord;
return vout;
}
struct PSInputPixelLightingTx
{
float2 TexCoord : TEXCOORD0;
float4 PositionWS : TEXCOORD1;
float3 NormalWS : TEXCOORD2;
float4 Diffuse : COLOR0;
};
// Pixel shader: pixel lighting + texture.
float4 PSBasicPixelLightingTx(PSInputPixelLightingTx pin) : SV_Target0
{
float4 color = Texture.Sample(Sampler, pin.TexCoord) * pin.Diffuse;
float3 eyeVector = normalize(EyePosition - pin.PositionWS.xyz);
float3 worldNormal = normalize(pin.NormalWS);
ColorPair lightResult = ComputeLights(eyeVector, worldNormal, 3);
color.rgb *= lightResult.Diffuse;
// AddSpecular
color.rgb += lightResult.Specular * color.a;
// ApplyFog (we passed fogfactor in via PositionWS.w)
color.rgb = lerp(color.rgb, FogColor * color.a, pin.PositionWS.w);
return color;
}
Here is the helper function ComputeLights which implements a Blinn-Phong reflection model for the specular highlight.
struct ColorPair
{
float3 Diffuse;
float3 Specular;
};
ColorPair ComputeLights(float3 eyeVector, float3 worldNormal, uniform int numLights)
{
float3x3 lightDirections = 0;
float3x3 lightDiffuse = 0;
float3x3 lightSpecular = 0;
float3x3 halfVectors = 0;
[unroll]
for (int i = 0; i < numLights; i++)
{
lightDirections[i] = LightDirection[i];
lightDiffuse[i] = LightDiffuseColor[i];
lightSpecular[i] = LightSpecularColor[i];
halfVectors[i] = normalize(eyeVector - lightDirections[i]);
}
float3 dotL = mul(-lightDirections, worldNormal);
float3 dotH = mul(halfVectors, worldNormal);
float3 zeroL = step(0, dotL);
float3 diffuse = zeroL * dotL;
float3 specular = pow(max(dotH, 0) * zeroL, SpecularPower) * dotL;
ColorPair result;
result.Diffuse = mul(diffuse, lightDiffuse) * DiffuseColor.rgb + EmissiveColor;
result.Specular = mul(specular, lightSpecular) * SpecularColor;
return result;
}
These BasicEffect shaders don't make use of ambient color, but you could modify them to do so if you wanted. All ambient color does is provide a 'minimum color value' that's independent of dynamic lights.
Note that there also some unofficial Physically-Based Rendering (PBR) materials extension in some Wavefront OBJ files. See Extending Wavefront MTL for Physically-Based. More modern geometry formats like glTF assume PBR materials properties which is things like an albedo texture, normal texture, roughness/metalness texture, etc..
I am working on a C++ 3D game and I am having trouble with how the mesh looks when I am loading it from a glTF file on the screen.
This is the 3D model I am trying to load.
I am using DirectX 11.
As you can see the mesh is flipped on the X-axis and I can't figure what to do to flip to look normal.
This is how I want it to look.
struct VS_INPUT
{
float4 position: POSITION0;
float2 texcoord: TEXCOORD0;
float3 normal: NORMAL0;
};
struct VS_OUTPUT
{
float4 position: SV_POSITION;
float2 texcoord: TEXCOORD0;
float3 normal: NORMAL0;
float3 direction_to_camera: TEXCOORD1;
};
cbuffer constant: register(b0)
{
row_major float4x4 m_world;
row_major float4x4 m_view;
row_major float4x4 m_proj;
float4 m_light_direction;
float4 m_camera_position;
float4 m_light_position;
float m_light_radius;
};
VS_OUTPUT vsmain(VS_INPUT input)
{
VS_OUTPUT output = (VS_OUTPUT)0;
// WORLD SPACE
output.position = mul(input.position, m_world);
output.direction_to_camera = normalize(output.position - m_camera_position.xyz);
// VIEW SPACE
output.position = mul(output.position, m_view);
// SCREEN SPACE
output.position = mul(output.position, m_proj);
output.texcoord = input.texcoord;
output.normal = normalize(mul(input.normal, m_world));
return output;
}
This is the vertex shader I am using.
output.position.x = -output.position.x; in vertex shader
or when loading your mesh and parsing vertices invert x coordinate of vertex.
v.x = -v.x;
or scaling -1 on x axis should do the trick.
SetScale( -1.0f, 1.0f, 1.0f );
In the fragment shader, I defined two structures as follows
struct DirLight{
vec3 direction;
vec3 ambient;
vec3 diffuse;
vec3 specular;
};
struct PointLight {
vec3 position;
vec3 ambient;
vec3 diffuse;
vec3 specular;
float constant;
float linear;
float quadratic;
};
and in vertex shader, I defined the following variables, because I first want to do some transformations (like matrix multiplication that is not recommended in the fragment shader) on these uniform variables in the vertex shader.
uniform DirLight dirLight; // only one directional light
uniform int pointLightCnt; // number of point light sources
uniform PointLight pointLight[MAX]; // point lights
What should I do to transfer the structure in the vertex shader to the fragment shader?
Can I use a method similar to c++ like:
Define the structure in the header file, include them in both the vertex shader and the fragment shader, then define the corresponding out variable in the vertex shader, and define the corresponding in variable in the fragment shader to achieve it?
I was going to go into a long explanation of how to implement your lighting structure so it is generic to any light type, but that is a separate issue.
Your current issue is that the Vertex Function shouldn't need to use the lighting uniform at all; there's no data to pass between them. The only thing the Vertex shader should be doing is converting the local space to clip space and saving the intermediate world space as a separate part of the fragment shader's input so it can calculate the lighting properly.
All the lighting calculations can be done on the pixel/fragment shader and any dynamic lighting (positions, penumbra calculations, direction changes, etc) should be done on the CPU and just passed on to the GPU in the lighting buffer/uniform all at once.
This is in hlsl, but it's easily converted to glsl:
//Uniform
cbuffer matrix_cb : register(b0) {
float4x4 g_MODEL;
float4x4 g_VIEW;
float4x4 g_PROJECTION;
};
struct vs_in_t {
float3 position : POSITION;
float4 color : COLOR;
float2 uv : UV;
float4 normal : NORMAL;
};
struct ps_in_t {
float4 position : SV_POSITION;
float4 color : COLOR;
float2 uv : UV;
float4 normal : NORMAL;
float3 world_position : WORLD;
};
ps_in_t VertexFunction(vs_in_t input_vertex) {
ps_in_t output;
float4 local = float4(input_vertex.position, 1.0f);
float4 normal = input_vertex.normal;
float4 world = mul(local, g_MODEL);
float4 view = mul(world, g_VIEW);
float4 clip = mul(view, g_PROJECTION);
output.position = clip;
output.color = input_vertex.color;
output.uv = input_vertex.uv;
output.normal = normal;
output.world_position = world.xyz;
return output;
}
I am in the process of implementing lighting in my DirectX 11 project. The problem I have is that when I try to access a cbuffer value from the Pixel Shader function it's just returning float3(0, 0, 0) meanwhile when I access the same value in the Vertex Shader function it returns the correct value. Here is the shader:
/*********************************************\
VERTEX SHADER
\*********************************************/
//Constant buffers
cbuffer Object : register(cb0) {
float4x4 WorldMatrix;
};
cbuffer Camera : register(cb1) {
float4x4 ViewMatrix;
float4x4 ProjectionMatrix;
};
cbuffer LightBuffer : register(cb2) {
float3 AmbientColor;
}
//IO Structs
struct VS_INPUT {
float3 Position : POSITION;
float2 UV : TEXCOORD;
float3 Normal : NORMAL;
};
struct VS_OUTPUT {
float4 Position : SV_POSITION;
float2 UV : TEXCOORD;
float3 Normal : NORMAL;
};
VS_OUTPUT VS(VS_INPUT input){
VS_OUTPUT output;
float4 Position;
//Multiply position with AmbientColor (should be 1, 1, 1), position unchanged
Position = mul(ViewMatrix, float4(input.Position * AmbientColor, 1));
Position = mul(ProjectionMatrix, Position);
Position = mul(WorldMatrix, Position);
output.Position = Position;
output.UV = input.UV;
output.Normal = mul(WorldMatrix, input.Normal);
return output;
}
/*********************************************\
PIXEL SHADER
\*********************************************/
SamplerState TextureState;
Texture2D<float4> Texture;
float4 PS(VS_OUTPUT input) : SV_TARGET {
float4 MaterialColor = Texture.Sample(TextureState, input.UV);
//Multiply color with AmbientColor (should be 1, 1, 1), returns black
float3 FinalColor = MaterialColor.xyz * AmbientColor;
return float4(FinalColor, MaterialColor.a);
}
Here's is the value I'm sending (c++):
_LightsUniform.AmbientColor = XMFLOAT3(1, 1, 1);
DeviceContext->UpdateSubresource(_LightBuffer, 0, NULL, &_LightsUniform, 0, 0);
DeviceContext->VSSetConstantBuffers(2, 1, &_LightBuffer);
DeviceContext->PSSetConstantBuffers(2, 1, &_LightBuffer);
Here is the result:
http://i.gyazo.com/357f1ed3ea33e6569ad2346b368cd975.png
And result without multiplying color: http://gyazo.com/b60b385daa94d3373e9552a523928e3f
I can't see what is wrong. Anybody else had the same issue?
I found the problem. Turns out that the registers for my cbuffer(s) were wrong, I used cb# where b# should be used. (I misunderstood what was written here: https://msdn.microsoft.com/en-us/library/windows/desktop/hh447212(v=vs.85).aspx)
Wrong code:
cbuffer LightBuffer : register(cb2) {
Changed to:
cbuffer LightBuffer : register(b2) {
I am trying to achieve deferred shading in DirectX 11 , c++. I have managed to create the G-Buffer and render my scene to it(Checked with "GPU PerfStudio"). I am having difficulty with final lighting stage. I am not able to read from textures(Diffuse,Normal,Specular) using SV_Position returned coordinates.
This is the pixel shader used to render light as shapes.
Texture2D<float4> Diffuse : register( t0 );
Texture2D<float4> Normal : register( t1 );
Texture2D<float4> Position : register( t2 );
cbuffer MaterialBuffer : register( b1 )
{
float4 ambient;
float4 diffuse;
float4 specular;
}
//--------------------------------------------------------------------------------------
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float4 PosVS: POSITION;
float4 Color : COLOR0;
float4 normal : NORMAL;
};
float4 main(VS_OUTPUT input) : SV_TARGET
{
//return Diffuse[screenPosition.xy]+Normal[screenPosition.xy]+Position[screenPosition.xy];
//return float4(1.0f, 1.0f, 1.0f, 1.0f);
//--------------------------------------------------------------------------------------
//Problematic line
float4 b=Diffuse.Load(int3(input.Pos.xy,0));
//--------------------------------------------------------------------------------------
return b;
}
I have checked with "GPU PerfStudio" the input textures are properly bound.
The above code is returning the color I used to clear the texture.(From my debugging I have found that its returning value at pixel location 0,0)
If I replace the problematic line with:-
float4 b=Diffuse.Load(int3(350,300,0));
Then its rendering the value at 350,300 pixel location with the proper shape of light.
Thanks
Do you tried with the debug flag D3D11_CREATE_DEVICE_DEBUG at device creation and looked at the output log. You may experience signature mismatch between the Vertex and the Pixel stages. It would explain why the sv_position semantic do not behave correctly.
I solved the problem.I was using the same z-buffer for rendering light geometries that I had used previously for G-buffer.
Thank you for your response.