I'm trying extend a renderer (for TressFX) with a Geometry Shader, and therefore I'm taking babysteps in order to see that everything works as it should. Therefore I've created a simpel pass-through Geometry Shader, which works for triangles, but for some reason does not work for lines or points (transforming the triangle to lines and points), since I can't see the output. Result link
I would be glad if someone could point out why it doesn't work for lines and points.
code
ASetPrimitiveTopology is set to D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST
Vertex Shader
struct PS_INPUT_HAIR_AA
{
float4 Position : SV_POSITION;
float4 Tangent : Tangent;
float4 p0p1 : TEXCOORD0;
};
PS_INPUT_HAIR_AA VS_RenderHair( uint vertexId : SV_VertexID )
{
PS_INPUT_HAIR_AA Output = (PS_INPUT_HAIR_AA)0;
Calculate position etc...
Output.Position = mul(float4(v, 1), g_mViewProj);
Output.Tangent = float4(t, ratio);
Output.p0p1 = float4( v.xy, g_HairVertexPositions[vertexId+1].xy );
return Output;
}
Geometry Shader for triangles (which works)
[maxvertexcount(64)] // more than enough
void GS_RenderHairPassThrough( triangle PS_INPUT_HAIR_AA input[3], inout TriangleStream<PS_INPUT_HAIR_AA> OutputStream )
{
PS_INPUT_HAIR_AA output = (PS_INPUT_HAIR_AA)0;
for(uint i = 0; i < 3; ++i) {
output.Position = input[i].Position;
output.Tangent = input[i].Tangent;
output.p0p1 = input[i].p0p1;
OutputStream.Append( output );
}
OutputStream.RestartStrip();
}
Geometry Shader for points and lines (which does not work)
[maxvertexcount(64)] // more than enough
void GS_RenderHairPassThrough( triangle PS_INPUT_HAIR_AA input[3], inout PointStream<PS_INPUT_HAIR_AA> OutputStream )
{
PS_INPUT_HAIR_AA output = (PS_INPUT_HAIR_AA)0;
for(uint i = 0; i < 3; ++i) {
output.Position = input[i].Position;
output.Tangent = input[i].Tangent;
output.p0p1 = input[i].p0p1;
OutputStream.Append( output );
OutputStream.RestartStrip(); // Not sure if I need this
}
}
[maxvertexcount(64)] // more than enough
void GS_RenderHairPassThrough( triangle PS_INPUT_HAIR_AA input[3], inout LineStream<PS_INPUT_HAIR_AA> OutputStream )
{
PS_INPUT_HAIR_AA output = (PS_INPUT_HAIR_AA)0;
for(uint i = 0; i < 3; ++i) {
output.Position = input[i].Position;
output.Tangent = input[i].Tangent;
output.p0p1 = input[i].p0p1;
OutputStream.Append( output );
}
OutputStream.RestartStrip();
}
However, if I hard code the output.Position in the Point/Line pass-through Geometry Shader I can manage to get an output which I can see on the screen.
I don't think you can change the primitive topology in geometry shader. to render the mesh as lines/points, you might want to set that in your render function as below.
g_pd3dDevice->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_LINELIST);
or
g_pd3dDevice->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_POINTLIST);
another thing is if you want to change the primitive type in geometry shader, you should use
void GS_RenderHairPassThrough( line PS_INPUT_HAIR_AA input[3], inout PointStream<PS_INPUT_HAIR_AA> OutputStream )
or
void GS_RenderHairPassThrough( point PS_INPUT_HAIR_AA input[3], inout PointStream<PS_INPUT_HAIR_AA> OutputStream )
I see you use triangle for both of them as the first argument of GS_RenderHairPassThrough
Related
I am trying to add a Geometry Shader to my DirectX 11 project in C++
There are no examples of this anywhere I look. There are millions of tutorials on OpenGL but nothing on geometry shaders in DirectX
I just wrote a basic shader below, but I get the following error when trying to build it
error X3514: 'LightGeometryShader' must have a max vertex count
Can anyone please advise on what this shader is missing to be able to compile?
////////////////////////////////////////////////////////////////////////////////
// Filename: light.gs
////////////////////////////////////////////////////////////////////////////////
//////////////
// TYPEDEFS //
//////////////
struct GeometryInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
////////////////////////////////////////////////////////////////////////////////
// Geometry Shader
////////////////////////////////////////////////////////////////////////////////
PixelInputType LightGeometryShader(GeometryInputType input)
{
PixelInputType output;
output = input;
return output;
}
GeometryShader is not necessarily a 1:1 function, which is why you have to provide a max vertex count. See Microsoft Docs.
[maxvertexcount(3)]
void LightGeometryShader( triangle GeometryInputType input[3],
inout TriangleStream<PixelInputType> outStream )
{
PixelInputType output;
for( int v = 0; v < 3; v++ )
{
output.position = input[v].position;
output.tex = input[v].tex;
output.normal = input[v].normal;
outStream.Append( output );
}
}
Geometry Shader was introduced with Direct3D 10, so the bulk of the deved samples were in the legacy DirectX SDK at the time. You can find the latest copy of these samples buildable without the legacy DirectX SDK on GitHub.
Images with examples of the problem: http://imgur.com/gallery/vmMyk
Hi,
I need some help with rendering 2D objects in 3D scene with 3D camera. I think I managed to solve 2D coordinates with LH world coordinates. However, my rendered 2D objects are in a correct place, only when camera is at [0.0f, 0.0f, 0.0f] coordinates. In every other position, the location of 2D objects on scene is malformed. I think my matrices are screwed up, but don't know where to look further. I'd appreciate good ideas, please comment if something's missing for you, I'll edit the main post to provide you more information.
I'm using simple 3D color HLSL (VS and PS ver: 4.0) shader with alpha blending for the bigger triangle:
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float4 Color : COLOR;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
PS_INPUT VS ( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
input.Pos.w = 1.0f;
output.Pos = mul ( input.Pos, World );
output.Pos = mul ( output.Pos, View );
output.Pos = mul ( output.Pos, Projection );
output.Color = input.Color;
return output;
}
float4 PS ( PS_INPUT input ) : SV_Target
{
return input.Color;
}
That's my Vertex data struct:
struct Vertex
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT4 color;
Vertex() {};
Vertex(DirectX::XMFLOAT3 aPosition, DirectX::XMFLOAT4 aColor)
: position(aPosition)
, color(aColor)
{};
};
Render call for object:
bool PrimitiveMesh::Draw()
{
unsigned int stride = sizeof(Vertex);
unsigned int offset = 0;
D3DSystem::GetD3DDeviceContext()->IASetVertexBuffers(0, 1, &iVertexBuffer, &stride, &offset);
D3DSystem::GetD3DDeviceContext()->IASetIndexBuffer(iIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
D3DSystem::GetD3DDeviceContext()->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return true;
}
Draw call with initialization:
static PrimitiveMesh* mesh;
if (mesh == 0)
{
std::vector<PrimitiveMesh::Vertex> vertices;
mesh = new PrimitiveMesh();
DirectX::XMFLOAT4 color = { 186 / 256.0f, 186 / 256.0f, 186 / 256.0f, 0.8f };
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 0.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 600.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(800.0f, 600.0f, 0.0f), color });
mesh->SetVerticesAndIndices(vertices);
}
// Getting clean matrices here:
D3D::Matrices(world, view, projection, ortho);
iGI->TurnZBufferOff();
iGI->TurnOnAlphaBlending();
mesh->Draw();
XMMATRIX view2D = Camera::View2D();
iColorShader->Render(iGI->GetContext(), 3, &world, &view2D, &ortho);
iGI->TurnZBufferOn();
These are my 2D calculations for camera:
up = DirectX::XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f);
lookAt = DirectX::XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
rotationMatrix = DirectX::XMMatrixRotationRollPitchYaw(0.0f, 0.0f, 0.0f); // (pitch, yaw, roll);
up = DirectX::XMVector3TransformCoord(up, rotationMatrix);
lookAt = DirectX::XMVector3TransformCoord(lookAt, rotationMatrix) + position;
view2D = DirectX::XMMatrixLookAtLH(position, lookAt, up);
I'll appreciate any help.
Kind regards.
With Shaders, you are not forced to use matrices, you have the flexibility to simplify the problem.
Let say you render 2d objects, using coordinates in pixels, the only requirement, is to scale offset them back into the normalized projective space.
A vertex shader could be as short as that :
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
PS_INPUT VS ( VS_INPUT input ) {
PS_INPUT output;
output.Pos.xy = input.Pos.xy * rcpDim * 2; // from pixel to [0..2]
output.Pos.xy -= 1; // to [-1..1]
output.Pos.y *= -1; // because top left in texture space is bottom left in projective space
output.Pos.zw = float2(0,1);
output.Color = input.Color;
return output;
}
You can of course build a set of matrices achieving the same result with your original shader, just set World and View to identity and projection to an ortho projection with XMMatrixOrthographicOffCenterLH(0,width,0,height,0,1). But as you are beggining with 3D programming, you will soon have to learn to deal with multiple shaders anyway, so take it as an exercice.
Well, I fixed my problem. For some weird reason, DirectXMath was generating false XMMATRIX. My XMMatrixOrtographicLH() was completely incorrect for good parameters. I solved my problem with classic definition of Ortohraphic matrix, found in this article (definition in Fig. 10)
auto orthoMatrix = DirectX::XMMatrixIdentity();
orthoMatrix.r[0].m128_f32[0] = 2.0f / Engine::VideoSettings::Current()->WindowWidth();
orthoMatrix.r[1].m128_f32[1] = 2.0f / Engine::VideoSettings::Current()->WindowHeight();
orthoMatrix.r[2].m128_f32[2] = -(2.0f / (screenDepth - screenNear));
orthoMatrix.r[2].m128_f32[3] = -(screenDepth + screenNear) / (screenDepth - screenNear);
galop1n give a good solution but on my system
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
NEED to be a multiple of 16 for made like here:
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
DirectX::XMFLOAT2 rcpDim2;
};
// Supply the vertex shader constant data.
VS_CONSTANT_BUFFER VsConstData;
VsConstData.rcpDim = { 2.0f / w,2.0f / h};
// Fill in a buffer description.
D3D11_BUFFER_DESC cbDesc;
ZeroMemory(&cbDesc, sizeof(cbDesc));
cbDesc.ByteWidth = sizeof(VS_CONSTANT_BUFFER);
cbDesc.Usage = D3D11_USAGE_DYNAMIC;
cbDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
cbDesc.MiscFlags = 0;
cbDesc.StructureByteStride = 0;
// Fill in the subresource data.
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = &VsConstData;
InitData.SysMemPitch = 0;
InitData.SysMemSlicePitch = 0;
// Create the buffer.
HRESULT hr = pDevice->CreateBuffer(&cbDesc, &InitData,
&pConstantBuffer11);
or aligned
__declspec(align(16))
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
};
Hi I am just learning directx11 and have come across a problem with PSSetShaderResources
if I change textures before the call to swapchain->Present only the first texture is displayed.
if I change textures between Present calls both are displayed but on consecutive frames.
Is there anyway of changing textures with PSSetShaderResources so I can use both (or more) on a single frame?
I know I can use texture arrays but it appears to me that you must have same size textures?
also I could upload two textures (or more) at a time but I would then have to have conditional statements within shader.
Below is the drawing loop I am using. And the simple shader that I am using
any help would be appreciated.
Paul
24OCT2014
tempBool = false;
for (int j = 0; j < 2; j++) //change 2 to texCount
{
devcon->UpdateSubresource(pConstantBuffer, 0, NULL, &cb, 0, 0);
devcon->VSSetConstantBuffers(0, 1, &pConstantBuffer);
devcon->PSSetConstantBuffers(0, 1, &pConstantBuffer);
tempBool = !tempBool;
if (tempBool)
{
devcon->PSSetShaderResources(0, 1, &pTex[0]);
}
else
{
devcon->PSSetShaderResources(0, 1, &pTex[1]);
}
for (int i = 0; i < texRun[j]; i++)
{
devcon->Draw(obLens[curPos+i], obStarts[curPos+i]);
}
curPos += texRun[j];
}
swapchain->Present(0, 0);
Texture2D txDiffuse : register( t0 );
SamplerState samLinear : register( s0 );
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
float4 vLightDir;
float4 vLightColor;
};
struct VOut
{
float4 Pos : SV_POSITION;
float3 Norm : NORMAL;
float2 Tex : TEXCOORD;
};
VOut VShader(float4 position : POSITION, float3 Norm : NORMAL, float2 Tex : TEXCOORD)
{
VOut output = (VOut)0;
output.Pos = mul( position, World );
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Norm = mul(Norm, World);
output.Tex = Tex;
return output;
}
float4 PShader0(float4 position : SV_POSITION, float3 Norm : NORMAL,
float2 Tex : TEXCOORD ) : SV_TARGET
{
float4 diffuse = 0;
diffuse = txDiffuse.Sample( samLinear, Tex );
float4 finalColor = 0;
finalColor = diffuse; // * 0.2;
finalColor += saturate( dot((float3)vLightDir,Norm) * diffuse);
finalColor.a = 1.0;
return finalColor;
}
Finally tracked down the error it was in my object loading code and nothing to do width direct3d
I was Loading second lot of objects over the first lot
for first lot of objects I am reading into verts which is my vertex data pointer
for the second lot I was also reading into verts instead of &verts[firstVertsCount]
anyway thanks for the help
Paul
I am trying to create a simple diffuse shader to paint primitive objects in DirectX 9 and faced following problem. When I used a DirectX primitive object like a Torus or Teapot, some faces in the foreground part of the mesh is invisible. I don't think this is the same thing as faces being invisible as I cannot reproduce this behavior for primitive objects like Sphere or Box where no two quads have the same normal. Following are some screenshots in fill and wire-frame modes.
torus fill-mode
Following is my vertex deceleration code.
// vertex position...
D3DVERTEXELEMENT9 element;
element.Stream = 0;
element.Offset = 0;
element.Type = D3DDECLTYPE_FLOAT3;
element.Method = D3DDECLMETHOD_DEFAULT;
element.Usage = D3DDECLUSAGE_POSITION;
element.UsageIndex = 0;
m_vertexElement.push_back(element);
// vertex normal
element.Stream = 0;
element.Offset = 12; //3 floats * 4 bytes per float
element.Type = D3DDECLTYPE_FLOAT3;
element.Method = D3DDECLMETHOD_DEFAULT;
element.Usage = D3DDECLUSAGE_NORMAL;
element.UsageIndex = 0;
m_vertexElement.push_back(element);
And shader code in development.
float4x4 MatWorld : register(c0);
float4x4 MatViewProj : register(c4);
float4 matColor : register(c0);
struct VS_INPUT
{
float4 Position : POSITION;
float3 Normal : NORMAL;
};
struct VS_OUTPUT
{
float4 Position : POSITION;
float3 Normal : TEXCOORD0;
};
struct PS_OUTPUT
{
float4 Color : COLOR0;
};
VS_OUTPUT vsmain(in VS_INPUT In)
{
VS_OUTPUT Out;
float4 wpos = mul(In.Position, MatWorld);
Out.Position = mul(wpos, MatViewProj);
Out.Normal = normalize(mul(In.Normal, MatWorld));
return Out;
};
PS_OUTPUT psmain(in VS_OUTPUT In)
{
PS_OUTPUT Out;
float4 ambient = {0.1, 0.0, 0.0, 1.0};
float3 light = {1, 0, 0};
Out.Color = ambient + matColor * saturate(dot(light, In.Normal));
return Out;
};
I have also tried setting different render states for Depth-Stencil but wasn't successful.
project files
I figure it out! this is a Depth Buffer(Z-Buffer) issue, you can enable Z-Buffer in your code, either by fixed pipeline or in the shader.
To enable z-buffer in fixed pipeline:
First add the following code when creating D3D deivce
d3dpp.EnableAutoDepthStencil = TRUE ;
d3dpp.AutoDepthStencilFormat = D3DFMT_D16 ;
Then enable z-buffer before drawing
device->SetRenderState(D3DRS_ZENABLE, TRUE) ;
At last, clear z-buffer in render function
device->Clear( 0, NULL, D3DCLEAR_TARGET | D3DCLEAR_ZBUFFER, D3DCOLOR_XRGB(0,0,0), 1.0f, 0 );
I am setting an HLSL effect variable in the following way in a number of places.
extern ID3D10EffectVectorVariable* pColour;
pColour = pEffect->GetVariableByName("Colour")->AsVector();
pColour->SetFloatVector(temporaryLines[i].colour);
In one of the places it is set in a loop, each line in the vector temporaryLines has a D3DXCOLOR variable associated with it. The most annoying thing about this problem is that it actually works on rare occasions, but most of the time it doesn't. Are there any known issues with this kind of code?
Here it works:
void GameObject::Draw(D3DMATRIX matView, D3DMATRIX matProjection)
{
device->IASetInputLayout(pVertexLayout);
mesh.SetTopology();//TODO should not be done multiple times
// select which vertex buffer and index buffer to display
UINT stride = sizeof(VERTEX);
UINT offset = 0;
device->IASetVertexBuffers(0, 1, mesh.PBuffer(), &stride, &offset);
device->IASetIndexBuffer(mesh.IBuffer(), DXGI_FORMAT_R32_UINT, 0);
pColour->SetFloatVector(colour);
// create a scale matrix
D3DXMatrixScaling(&matScale, scale.x, scale.y, scale.z);
// create a rotation matrix
D3DXMatrixRotationYawPitchRoll(&matRotate, rotation.y, rotation.x, rotation.z);
// create a position matrix
D3DXMatrixTranslation(&matTranslation, position.x, position.y, position.z);
// combine the matrices and render
matFinal =
matScale *
matRotate *
matTranslation *
matView * matProjection;
pTransform->SetMatrix(&matFinal._11);
pRotation->SetMatrix(&matRotate._11); // set the rotation matrix in the effect
pPass->Apply(0);
device->DrawIndexed(mesh.Indices(), 0, 0); //input specific
}
Here is occasionally works:
void BatchLineRenderer::RenderLines(D3DXMATRIX matView, D3DXMATRIX matProjection)
{
device->IASetInputLayout(pVertexLayout);
device->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_LINESTRIP);
// select which vertex buffer and index buffer to display
UINT stride = sizeof(LINE);
UINT offset = 0;
device->IASetVertexBuffers(0, 1, &pBuffer, &stride, &offset);
device->IASetIndexBuffer(iBuffer, DXGI_FORMAT_R32_UINT, 0);
allLines = temporaryLines.size();
for(int i = 0; i < allLines; i++)
{
pColour->SetFloatVector(temporaryLines[i].colour); // in the line loop too?
// combine the matrices and render
D3DXMATRIX matFinal =
temporaryLines[i].scale *
temporaryLines[i].rotation *
temporaryLines[i].position *
matView * matProjection;
pTransform->SetMatrix(&matFinal._11);
pRotation->SetMatrix(&temporaryLines[i].rotation._11); // set the rotation matrix in the effect
pPass->Apply(0);
device->DrawIndexed(2, 0, 0);
}
temporaryLines.clear();
}
the effect file:
float4x4 Transform; // a matrix to store the transform
float4x4 Rotation; // a matrix to store the rotation transform
float4 LightVec = {0.612f, 0.3535f, 0.612f, 0.0f}; // the light's vector
float4 LightCol = {1.0f, 1.0f, 1.0f, 1.0f}; // the light's color
float4 AmbientCol = {0.3f, 0.3f, 0.3f, 1.0f}; // the ambient light's color
float4 Colour;
// a struct for the vertex shader return value
struct VSOut
{
float4 Col : COLOR; // vertex normal
float4 Pos : SV_POSITION; // vertex screen coordinates
};
// the vertex shader
VSOut VS(float4 Norm : NORMAL, float4 Pos : POSITION)
{
VSOut Output;
Output.Pos = mul(Pos, Transform); // transform the vertex from 3D to 2D
Output.Col = AmbientCol; // set the vertex color to the input's color
float4 Normal = mul(Norm, Rotation);
Output.Col += saturate(dot(Normal, LightVec)) * LightCol * Colour; // add the diffuse and passed in light
return Output; // send the modified vertex data to the Rasterizer Stage
}
// the pixel shader
float4 PS(float4 Col : COLOR) : SV_TARGET
{
return Col; // set the pixel color to the color passed in by the Rasterizer Stage
}
// the primary technique
technique10 Technique_0
{
// the primary pass
pass Pass_0
{
SetVertexShader(CompileShader(vs_4_0, VS()));
SetGeometryShader(NULL);
SetPixelShader(CompileShader(ps_4_0, PS()));
}
}
So the Colour HLSL variable has not been defined inside a ConstantBuffer, just a normal shader variable.
Perhaps the variable should rather be defined in a Constant buffer, updateblae per frame? Similar to how the world and view matrices should be defined in. At least then the GPU knows you want to update the colour variable each time you render. (As you are updating the value before you draw).
cbuffer cbChangesEveryFrame
{
//The MVP matrices.
matrix World;
matrix View;
float4 Colour;
}
Another point I would consider is to get the pointer to the technique desc everytime before the draw call (or pass through loop),
and not reuse it, seems to also make a difference.
//Initiate the pass through loop for the shader effect.
technique->GetDesc(&desc);
for (UINT p=0; p<desc.Passes; p++)
{
//Apply this pass through.
technique->GetPassByIndex(p)->Apply(0);
//draw indexed, instanced.
device->device->DrawIndexedInstanced(indicesCount, (UINT) instanceCount, 0, 0, 0);
}