Passing normal data to shader - c++

I have written the simple code to render some objects with DirectX 11.
The position has been passed to shader correctly. However, the normals seem to be lost somewhere. I have changed the shader to see the normals' value as a color (just a debug purpose) and I get the black box (0,0,0 normals of every vertex?), in the right position:
Note that on the right bar I can see my NORMAL values (they are right!), but in the "locals" only position is set and the rest of values are NaN. Why?
The shader:
... //some constants
struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD;
float3 normal : NORMAL;
float3 tangent : TANGENT;
//float3 binormal : BINORMAL;
};
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float4 worldPos : POSITION;
float2 TexCoord : TEXCOORD;
float3 normal : NORMAL;
float3 tangent : TANGENT;
};
//VS_OUTPUT VS(float4 inPos : POSITION, float2 inTexCoord : TEXCOORD, float3 inNormal : NORMAL, float3 tangent : TANGENT)
VS_OUTPUT VS(VertexInputType input)
{
VS_OUTPUT output;
output.Pos = mul(input.position, WVP);
output.worldPos = mul(input.position, World);
output.normal = input.normal;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
return float4(input.normal*100, 1);
}
technique10 RENDER
{
pass P0
{
SetVertexShader( CompileShader( vs_4_0, VS() ) );
// SetGeometryShader( CompileShader( gs_4_0, GS() ) );
SetPixelShader( CompileShader( ps_4_0, PS() ) );
SetBlendState( SrcAlphaBlendingAdd, float4( 0.0f, 0.0f, 0.0f, 0.0f ), 0xFFFFFFFF );
}
}
During rendering I use:
UINT stride = sizeof(Vertex);
UINT offset = 0;
context->IASetVertexBuffers(0, 1, &buffers->vertexBuffer, &stride, &offset); //set vertex buffer
context->IASetIndexBuffer(buffers->indexBuffer, DXGI_FORMAT_R16_UINT, 0); //set index buffer
for(int i=0; i<structure.subsets.size(); i++){
//set matrices
DirectX::XMFLOAT4X4 view = camera->getView();
DirectX::XMMATRIX camView = XMLoadFloat4x4(&view);
DirectX::XMFLOAT4X4 projection = camera->getProjection();
DirectX::XMMATRIX camProjection = XMLoadFloat4x4(&projection);
DirectX::XMMATRIX worldViewProjectionMatrix = objectWorldMatrix * camView * camProjection;
//set the constants per object
ConstantBufferStructure constantsPerObject;
constantsPerObject.worldViewProjection = XMMatrixTranspose(worldViewProjectionMatrix);
constantsPerObject.world = XMMatrixTranspose(objectWorldMatrix);
//bind constants per object to constant buffer and send it to vertex and pixel shaders
context->UpdateSubresource(constantBuffer, 0, NULL, &constantsPerObject, 0, 0);
context->VSSetConstantBuffers(0, 1, &constantBuffer);
context->PSSetConstantBuffers(0, 1, &constantBuffer);
//context->PSSetSamplers(0,1,&m_sampleState);
context->RSSetState(RSCullDefault);
int start = structure.subsets[i]->getVertexIndexStart();
int count = structure.subsets[i]->getVertexIndexAmmount();
context->DrawIndexed(count, start, 0);
}
And for the shader initializing the :
// Create the vertex shader
hr = device->CreateVertexShader( pVSBlob->GetBufferPointer(), pVSBlob->GetBufferSize(), NULL, &vertexShader );
//create the input layout
VertexLayoutDescirption layoutDescription; //will gives us the data that is corresponding with Vertex structure
hr = device->CreateInputLayout(layoutDescription.layout, layoutDescription.entriesCount, pVSBlob->GetBufferPointer(), pVSBlob->GetBufferSize(), &*vertexLayout );
pVSBlob->Release();
context->IASetInputLayout( *vertexLayout );
//compile the pixel shader
ID3DBlob* pPSBlob = NULL;
CompileShaderFromFile( C::toWChar(C::toString(pixelShaderFileName)), "PS", "ps_4_0", &pPSBlob );
// Create the pixel shader
hr = device->CreatePixelShader( pPSBlob->GetBufferPointer(), pPSBlob->GetBufferSize(), NULL, &pixelShader );
Where:
struct Vertex{//vertex structure
Vertex() : weightCount(0){}
Vertex(float x, float y, float z, float u, float v, float nx, float ny, float nz, float tx, float ty, float tz)
: position(x, y, z), textureCoordinates(u, v), normals(nx, ny, nz), tangents(tx, ty, tz), weightCount(0){}
Vertex(DirectX::XMFLOAT3 position, DirectX::XMFLOAT2 textureCoordinates, DirectX::XMFLOAT3 normals, DirectX::XMFLOAT3 biTangents)
: position(position), textureCoordinates(textureCoordinates), normals(normals), tangents(tangents), weightCount(0){}
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT2 textureCoordinates;
DirectX::XMFLOAT3 normals;
DirectX::XMFLOAT3 tangents;
DirectX::XMFLOAT3 biTangents;
//will not be sent to shader (and used only by skinned models)
int startWeightIndex; //index in Subset::weights (from 0 to X for each subset separately)
int weightCount; //=0 means that it's not skinned vertex
};
/* will be used by Shader, should be corresponding th Vertex (the data that we want to transfer to shader) */
struct VertexLayoutDescirption{
D3D11_INPUT_ELEMENT_DESC layout[4]; //the input layout
UINT entriesCount; //the numer of elements of layout[], will be also used by Shader
VertexLayoutDescirption(){
entriesCount = 4;
for(UINT i=0; i<entriesCount; i++){
layout[i].SemanticIndex = 0;
layout[i].Format = DXGI_FORMAT_R32G32B32_FLOAT;
layout[i].InputSlot = 0;
layout[i].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
layout[i].InstanceDataStepRate = 0;
}
layout[0].SemanticName ="POSITION";
layout[0].AlignedByteOffset = 0; //(not D3D11_APPEND_ALIGNED_ELEMENT)
layout[1].SemanticName ="TEXCOORD";
layout[1].AlignedByteOffset = 12; //or D3D11_APPEND_ALIGNED_ELEMENT
layout[2].SemanticName ="NORMAL";
layout[2].AlignedByteOffset = 20; //or D3D11_APPEND_ALIGNED_ELEMENT
layout[3].SemanticName ="TANGENT";
layout[3].AlignedByteOffset = 32; //or D3D11_APPEND_ALIGNED_ELEMENT
}
};
The box model:
/*top vertices*/
structure.vertices[0] = Vertex(/*pos*/ -1.0f, +1.0f, -1.0f, /*uv*/ 1.0f, 1.0f, /*normals*/ 0.0f, 1.0f, -1.0f, /*tan*/ +1.0f, -1.0f, 1.0f);
structure.vertices[1] = Vertex(/*pos*/ +1.0f, +1.0f, -1.0f, /*uv*/ 0.0f, 1.0f, /*normals*/ 0.0f, 1.0f, +1.0f, /*tan*/ +1.0f, -1.0f, 1.0f);
structure.vertices[2] = Vertex(/*pos*/ +1.0f, +1.0f, +1.0f, /*uv*/ 1.0f, 0.0f, /*normals*/ 0.0f, 1.0f, +1.0f, /*tan*/ +1.0f, +1.0f, 1.0f);
structure.vertices[3] = Vertex(/*pos*/ -1.0f, +1.0f, +1.0f, /*uv*/ 0.0f, 0.0f, /*normals*/ 0.0f, 1.0f, -1.0f, /*tan*/ +1.0f, +1.0f, 1.0f);
/*bottom vertices*/
structure.vertices[4] = Vertex(/*pos*/ -1.0f, -1.0f, -1.0f, /*uv*/ 1.0f, 0.0f, /*normals*/ 0.0f, 1.0f, -1.0f, /*tan*/ -1.0f, -1.0f, 1.0f);
structure.vertices[5] = Vertex(/*pos*/ +1.0f, -1.0f, -1.0f, /*uv*/ 0.0f, 0.0f, /*normals*/ 0.0f, 1.0f, +1.0f, /*tan*/ -1.0f, -1.0f, 1.0f);
structure.vertices[6] = Vertex(/*pos*/ +1.0f, -1.0f, +1.0f, /*uv*/ 1.0f, 1.0f, /*normals*/ 0.0f, 1.0f, +1.0f, /*tan*/ -1.0f, +1.0f, 1.0f);
structure.vertices[7] = Vertex(/*pos*/ -1.0f, -1.0f, +1.0f, /*uv*/ 0.0f, 1.0f, /*normals*/ 0.0f, 1.0f, -1.0f, /*tan*/ -1.0f, +1.0f, 1.0f);
buffers = new Buffers();
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DEFAULT; //D3D11_USAGE_DYNAMIC
bd.ByteWidth = sizeof(Vertex) * structure.getVerticesCount();
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = structure.vertices;
if(device->CreateBuffer(&bd, &InitData, &buffers->vertexBuffer) != S_OK){
return false;
}
... //index buffer
Why the normals has not been passed to shader while the position was? What did I miss?

In the shader file try to use float3 normal : TEXCOORD1; or float3 normal : TEXCOORD2; or any Semantic TEXCOORD with any Index instead of float3 normal : NORMAL; in VS_OUTPUT structure,

Related

Directx-11 Two vertex buffers and two input slots

I have a problem with setting two separate vertex buffers (and input slots). One buffer must contain vertices, the second buffer - color data. I have found this problem here:
Direct3D multiple vertex buffers, non interleaved elements
So i followed this instructions but got error message box:
Error Code: E_INVALIDARG (0x80070057)
Calling: md3dDevice->CreateBuffer(&vbd2, &initData2, &mBoxVB2)
Here's the code:
//layout array
D3D11_INPUT_ELEMENT_DESC vertexDesc3[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, 0, D3D11_INPUT_PER_VERTEX_DATA, 0}
};
D3DX11_PASS_DESC passDesc;
mTech->GetPassByIndex(0)->GetDesc(&passDesc);
HR(md3dDevice->CreateInputLayout(vertexDesc3, 2, passDesc.pIAInputSignature,
passDesc.IAInputSignatureSize, &mInputLayout));
// buffers
ID3D11Buffer* mBoxVB;
ID3D11Buffer* mBoxVB2;
ID3D11Buffer* buffers[2];
buffers[0] = mBoxVB;
buffers[1] = mBoxVB2;
XMFLOAT3 vertex[] =
{
XMFLOAT3(-1.0f, -1.0f, -1.0f) ,
XMFLOAT3(-1.0f, +1.0f, -1.0f) ,
XMFLOAT3(+1.0f, +1.0f, -1.0f) ,
XMFLOAT3(+1.0f, -1.0f, -1.0f) ,
XMFLOAT3(-1.0f, -1.0f, +1.0f) ,
XMFLOAT3(-1.0f, +1.0f, +1.0f) ,
XMFLOAT3(+1.0f, +1.0f, +1.0f) ,
XMFLOAT3(+1.0f, -1.0f, +1.0f)
};
// vertex buffer
D3D11_BUFFER_DESC vbd;
vbd.Usage = D3D11_USAGE_IMMUTABLE;
vbd.ByteWidth = sizeof(XMFLOAT3) * 8;
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd.CPUAccessFlags = 0;
vbd.MiscFlags = 0;
vbd.StructureByteStride = 0;
D3D11_SUBRESOURCE_DATA vinitData;
vinitData.pSysMem = vertex;
HR(md3dDevice->CreateBuffer(&vbd, &vinitData, &mBoxVB));
XMFLOAT4 color[] =
{
(const float*)&Colors::White ,
(const float*)&Colors::Black ,
(const float*)&Colors::Red ,
(const float*)&Colors::Green ,
(const float*)&Colors::Blue ,
(const float*)&Colors::Yellow ,
(const float*)&Colors::Cyan ,
(const float*)&Colors::Magenta
};
// where the namespace Colors is defined like
namespace Colors
{
XMGLOBALCONST XMVECTORF32 White = {1.0f, 1.0f, 1.0f, 1.0f};
XMGLOBALCONST XMVECTORF32 Black = {0.0f, 0.0f, 0.0f, 1.0f};
XMGLOBALCONST XMVECTORF32 Red = {1.0f, 0.0f, 0.0f, 1.0f};
XMGLOBALCONST XMVECTORF32 Green = {0.0f, 1.0f, 0.0f, 1.0f};
XMGLOBALCONST XMVECTORF32 Blue = {0.0f, 0.0f, 1.0f, 1.0f};
XMGLOBALCONST XMVECTORF32 Yellow = {1.0f, 1.0f, 0.0f, 1.0f};
XMGLOBALCONST XMVECTORF32 Cyan = {0.0f, 1.0f, 1.0f, 1.0f};
XMGLOBALCONST XMVECTORF32 Magenta = {1.0f, 0.0f, 1.0f, 1.0f};
};
// color buffer
D3D11_BUFFER_DESC vbd2;
vbd2.Usage = D3D11_USAGE_IMMUTABLE;
vbd2.ByteWidth = sizeof(XMFLOAT4) * 8;
vbd2.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd2.CPUAccessFlags = 0;
vbd2.MiscFlags = 0;
vbd2.StructureByteStride = 0;
D3D11_SUBRESOURCE_DATA initData2;
initData2.pSysMem = color;
// here is our problem:
HR(md3dDevice->CreateBuffer(&vbd2, &initData2, &mBoxVB2));
// inside DrawScene():
UINT stride[] = {sizeof(XMFLOAT3), sizeof(XMFLOAT4)};
UINT offset[] = {0,0};
md3dImmediateContext->IASetVertexBuffers(0, 2, buffers, stride, offset);
md3dImmediateContext->IASetIndexBuffer(mBoxIB, DXGI_FORMAT_R32_UINT, 0);
// and the shaders
cbuffer cbPerObject
{
float4x4 gWorldViewProj;
};
struct VertexIn
{
float3 PosL : POSITION;
float4 Color : COLOR;
};
struct VertexOut
{
float4 PosH : SV_POSITION;
float4 Color : COLOR;
};
VertexOut VS(VertexIn vin)
{
VertexOut vout;
vout.PosH = mul(float4(vin.PosL, 1.0f), gWorldViewProj);
vout.Color = vin.Color;
return vout;
}
float4 PS(VertexOut pin) : SV_Target
{
return pin.Color;
}
technique11 ColorTech
{
pass P0
{
SetVertexShader( CompileShader( vs_5_0, VS() ) );
SetGeometryShader( NULL );
SetPixelShader( CompileShader( ps_5_0, PS() ) );
}
}
What I'm doing wrong?
BuildFX():
DWORD shaderFlags = 0;
#if defined( DEBUG ) || defined( _DEBUG )
shaderFlags |= D3D10_SHADER_DEBUG;
shaderFlags |= D3D10_SHADER_SKIP_OPTIMIZATION;
#endif
ID3D10Blob* compiledShader = 0;
ID3D10Blob* compilationMsgs = 0;
HRESULT hr = D3DX11CompileFromFile(L"FX/color.fx", 0, 0, 0, "fx_5_0", shaderFlags,
0, 0, &compiledShader, &compilationMsgs, 0);
// compilationMsgs can store errors or warnings.
if( compilationMsgs != 0 )
{
MessageBoxA(0, (char*)compilationMsgs->GetBufferPointer(), 0, 0);
ReleaseCOM(compilationMsgs);
}
// Even if there are no compilationMsgs, check to make sure there were no other errors.
if(FAILED(hr))
{
DXTrace(__FILE__, (DWORD)__LINE__, hr, L"D3DX11CompileFromFile", true);
}
HR(D3DX11CreateEffectFromMemory(compiledShader->GetBufferPointer(), compiledShader->GetBufferSize(),
0, md3dDevice, &mFX));
// Done with compiled shader.
ReleaseCOM(compiledShader);
mTech = mFX->GetTechniqueByName("ColorTech");
mfxWorldViewProj = mFX->GetVariableByName("gWorldViewProj")->AsMatrix();
vbd2 is uninitialised.
You've copy/pasted the code from above and not changed vbd to vbd2.

OpenGL, two objects moving independently

I need to move two objects in OpenGL independently.
This is vertex positions array:
const float vertexPositions[] = {
0.25f, 0.25f, 0.0f, 1.0f,
0.25f, -0.25f, 0.0f, 1.0f,
-0.25f, -0.25f, 0.0f, 1.0f,
0.75f, 0.45f, 0.0f, 1.0f,
0.45f, -0.45f, 0.0f, 1.0f,
-0.65f, -0.95f, 0.0f, 1.0f,
};
This go to a buffer and then there is the following command which draws these triangles:
glDrawArrays(GL_TRIANGLES, 0, 3*2);
Below is part responsible for rotating:
#version 330
layout(location = 0) in vec4 position;
uniform float loopDuration;
uniform float time;
void main()
{
float timeScale = 3.14159f * 2.0f / loopDuration;
float currTime = mod(time, loopDuration);
vec4 totalOffset = vec4(
cos(currTime * timeScale) * 0.5f,
sin(currTime * timeScale) * 0.5f,
0.0f,
0.0f);
gl_Position = position + totalOffset;
}
Unfortunately it does not work as I expected - instead of moving independently, these triangles seems to be joined together like there were on a plate of glass. What I can do in order to translate and rotate them independently? How can I do it using GPU, not CPU?
I think you'ra doing this tutorial. In that case I suggest you to remove the last 12 floats from the vertexPositions, so you get the following:
const float vertexPositions[] = {
0.25f, 0.25f, 0.0f, 1.0f,
0.25f, -0.25f, 0.0f, 1.0f,
-0.25f, -0.25f, 0.0f, 1.0f,
};
And change the glDrawArrays(GL_TRIANGLES, 0, 3*2); to glDrawArrays(GL_TRIANGLES, 0, 3);. In your display() function, paste after the first calling of glDrawArrays() the following:
glUniform1f(loopDurationUnf, 2.5f);
glDrawArrays(GL_TRIANGLES, 0, 3);
This should draw the same triangle with twice the speed.

DirectX11 Shader Compilation Issue

I'm working on a simple DirectX application to display couple of triangles together as Tetrahedron,which Keeps crashing at start.I checked with VS2012 Debugger the error occurs at the stage where Shader is supposed to be compiled from a .fx file,So I assume it's got something to do with the shader.I have no idea what I did wrong.Below is the code of the Shader I'm Using.Assistance required.
struct Light
{
float3 pos;
float4 ambient;
float4 diffuse;
};
cbuffer cbPerFrame
{
Light light;
};
cbuffer cbPerObject
{
float4x4 WVP;
float4x4 World;
};
struct VS_OUTPUT
{
float4 Pos : SV_POSITION;
float4 worldPos : POSITION;
float4 color : COLOR;
float3 normal : NORMAL;
};
VS_OUTPUT VS(float4 inPos : POSITION, float4 inColor : COLOR, float3 normal : NORMAL)
{
VS_OUTPUT output;
output.Pos = mul(inPos, WVP);
output.worldPos = mul(inPos, World);
output.normal = mul(normal, World);
output.color = inColor;
return output;
}
float4 PS(VS_OUTPUT input) : SV_TARGET
{
input.normal = normalize(input.normal);
float4 diffuse = input.color;
float3 finalColor = float3(0.0f, 0.0f, 0.0f);
//Create the vector between light position and pixels position
float3 lightToPixelVec = light.pos - input.worldPos;
//Add the ambient light
float3 finalAmbient = diffuse * light.ambient;
//Turn lightToPixelVec into a unit length vector describing
//the pixels direction from the lights position
lightToPixelVec /= d;
//Calculate how much light the pixel gets by the angle
//in which the light strikes the pixels surface
float howMuchLight = dot(lightToPixelVec, input.normal);
//If light is striking the front side of the pixel
if( howMuchLight > 0.0f )
{
//Add light to the finalColor of the pixel
finalColor += diffuse * light.diffuse;
}
//make sure the values are between 1 and 0, and add the ambient
finalColor = saturate(finalColor + finalAmbient);
//Return Final Color
return float4(finalColor, diffuse.a);
}
Here's the part where the Compilation is supposed to happen
bool InitScene()
{
//Compile Shaders from shader file
hr = D3DX11CompileFromFile(L"Effects.fx", 0, 0, "VS", "vs_4_0", 0, 0, 0,
&VS_Buffer, 0, 0);
if(FAILED(hr))
{
MessageBox(0, L"Shader Compilation - Failed",
L"Error", MB_OK);
return false;
}
hr = D3DX11CompileFromFile(L"Effects.fx", 0, 0, "PS", "ps_4_0", 0, 0, 0,
&PS_Buffer, 0, 0);
//Create the Shader Objects
hr = d3d11Device->CreateVertexShader(VS_Buffer->GetBufferPointer(),
VS_Buffer->GetBufferSize(), NULL, &VS);
hr = d3d11Device->CreatePixelShader(PS_Buffer->GetBufferPointer(),
PS_Buffer->GetBufferSize(), NULL, &PS);
//Set Vertex and Pixel Shaders
d3d11DevCon->VSSetShader(VS, 0, 0);
d3d11DevCon->PSSetShader(PS, 0, 0);
light.pos = XMFLOAT3(0.25f, 0.5f, -1.0f);
light.ambient = XMFLOAT4(0.2f, 0.2f, 0.2f, 1.0f);
light.diffuse = XMFLOAT4(1.0f, 1.0f, 1.0f, 1.0f);
//X,Y,Z,R,G,B,A,NX,NY,NZ
//Create the vertex buffer
Vertex v[] =
{
Vertex( 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f),
Vertex( -0.5f, -0.5f, 0.5f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f),
Vertex( 0.5f, -0.5f, 0.5f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f),
Vertex( 0.0f, -0.5f, 0.5f, 1.0f, 0.0f, 0.0f, 1.0f,0.0f, 1.0f, 0.0f)
};
DWORD indices[] = {
//Front
0, 1, 2,
//Left
0, 1, 3,
//Right
0, 2, 3,
//Bottom
1, 2, 3
};
D3D11_BUFFER_DESC indexBufferDesc;
ZeroMemory( &indexBufferDesc, sizeof(indexBufferDesc) );
indexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc.ByteWidth = sizeof(DWORD) * 4 * 3;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc.CPUAccessFlags = 0;
indexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iinitData;
iinitData.pSysMem = indices;
d3d11Device->CreateBuffer(&indexBufferDesc, &iinitData, &IndexBuffer);
//d3d11DevCon->IASetIndexBuffer( squareIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
D3D11_BUFFER_DESC vertexBufferDesc;
ZeroMemory( &vertexBufferDesc, sizeof(vertexBufferDesc) );
vertexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
vertexBufferDesc.ByteWidth = sizeof( Vertex ) * 4;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexBufferDesc.CPUAccessFlags = 0;
vertexBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA vertexBufferData;
ZeroMemory( &vertexBufferData, sizeof(vertexBufferData) );
vertexBufferData.pSysMem = v;
hr = d3d11Device->CreateBuffer( &vertexBufferDesc, &vertexBufferData, &VertBuffer);
//Set the vertex buffer
UINT stride = sizeof( Vertex );
UINT offset = 0;
//d3d11DevCon->IASetVertexBuffers( 0, 1, &squareVertBuffer, &stride, &offset );
//Create the Input Layout
hr = d3d11Device->CreateInputLayout( layout,
numElements,VS_Buffer->GetBufferPointer(),
VS_Buffer->GetBufferSize(), &vertLayout );
//Set the Input Layout
d3d11DevCon->IASetInputLayout( vertLayout );
//Set Primitive Topology
d3d11DevCon->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
//Create the Viewport
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = width;
viewport.Height = height;
viewport.MinDepth = 0.0f;
viewport.MaxDepth = 2.0f;
//Set the Viewport
d3d11DevCon->RSSetViewports(1, &viewport);
//Create the buffer to send to the cbuffer in effect file
D3D11_BUFFER_DESC cbbd;
ZeroMemory(&cbbd, sizeof(D3D11_BUFFER_DESC));
cbbd.Usage = D3D11_USAGE_DEFAULT;
cbbd.ByteWidth = sizeof(cbPerObject);
cbbd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbbd.CPUAccessFlags = 0;
cbbd.MiscFlags = 0;
hr = d3d11Device->CreateBuffer(&cbbd, NULL, &cbPerObjectBuffer);
ZeroMemory(&cbbd, sizeof(D3D11_BUFFER_DESC));
cbbd.Usage = D3D11_USAGE_DEFAULT;
cbbd.ByteWidth = sizeof(cbPerFrame);
cbbd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbbd.CPUAccessFlags = 0;
cbbd.MiscFlags = 0;
hr = d3d11Device->CreateBuffer(&cbbd, NULL, &cbPerFrameBuffer);
//Camera information
camPosition = XMVectorSet( -5.0f, 5.0f, 8.0f, 0.0f );
camTarget = XMVectorSet( 0.0f, 0.0f, 0.0f, 0.0f );
camUp = XMVectorSet( 0.0f, 1.0f, 0.0f, 0.0f );
//Set the View matrix
camView = XMMatrixLookAtLH( camPosition, camTarget, camUp );
//Set the Projection matrix
camProjection = XMMatrixPerspectiveFovLH( 0.4f*3.14f, width/height, 1.0f, 1000.0f);
return true;
}
Your Vertex shader compiles, but your Pixel Shader doesn't:
lightToPixelVec /= d;
d is undefined
Since in your code you only check for VS compilation result, that makes sense that it crashes when trying to create Pixel Shader (as you send an invalid pointer).
As mentionned in comment, it's also important to check feature level, in case you develop for desktop/laptop pretty much any device should be at least feature level 10.1
In case of phone your should use one of those profile (whichever matches best):
ps_4_0_level_9_1, ps_4_0_level_9_2, ps_4_0_level_9_3

D3D11_BUFFER_DESC bytewidth "not working"

I'm having some issues with Direct3D. Namely the vertex buffer and its ByteWidth member.
I want to draw two quads, so I create my vertex buffer like so:
struct Vertex
{
XMFLOAT3 pos;
XMFLOAT3 normal;
XMFLOAT2 texCoord;
};
....
void GameWindow::CreateVerticesAndBuffer()
{
Vertex vertices[] =
{
{ XMFLOAT3(-1.0f, -1.0f, -1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, -1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, -1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, -1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(1.0f, 1.0f) },
{ XMFLOAT3(-1.0f, -1.0f, 1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(0.0f, 1.0f) },
{ XMFLOAT3(-1.0f, 1.0f, 1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(0.0f, 0.0f) },
{ XMFLOAT3(1.0f, 1.0f, 1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(1.0f, 0.0f) },
{ XMFLOAT3(1.0f, -1.0f, 1.0f), XMFLOAT3(0.0f, 0.0f, -1.0f), XMFLOAT2(1.0f, 1.0f) }
};
D3D11_BUFFER_DESC desc;
ZeroMemory(&desc, sizeof(desc));
desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
desc.CPUAccessFlags = 0;
desc.Usage = D3D11_USAGE_DEFAULT; //Will not ever change after creation (??)
desc.MiscFlags = 0;
desc.ByteWidth = sizeof(Vertex) * 8;
desc.StructureByteStride = 0;
D3D11_SUBRESOURCE_DATA data;
ZeroMemory(&data, sizeof(data));
data.pSysMem = vertices;
HR(device->CreateBuffer(
&desc,
&data,
&vertexBuffer));
UINT stride = sizeof(Vertex);
UINT offset = 0;
deviceContext->IASetVertexBuffers(0, 1, &vertexBuffer, &stride, &offset);
}
This code produces some weird results as seen here. The back face is mirrored for some reason.
But, if I change
desc.ByteWidth = sizeof(Vertex) * 8
to
desc.ByteWidth = sizeof(Vertex) * 9
It is drawn correctly.
Does anyone have any idea why this happens?
EDIT: Here is my CreateInputLayout:
D3D11_INPUT_ELEMENT_DESC inputDesc[] = {
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEX", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
hRes = device->CreateInputLayout(
inputDesc,
ARRAYSIZE(inputDesc),
vertexShaderSource->GetBufferPointer(),
vertexShaderSource->GetBufferSize(),
&vertexInputLayout);
You have specified DXGI_FORMAT_R32G32B32_FLOAT (float3) for your TEX member. Change it to DXGI_FORMAT_R32G32_FLOAT and it should work.

DirectX : Nothing is drawn on screen

I'm trying to develop a program using DirectX (10) to display on screen.
Thing is, it displays nothing but the color I use to clear the backbuffer.
(I apologize for the quite big chunks of code that follow).
Here is my rendering function :
void DXEngine::renderOneFrame()
{
//First, we clear the back buffer
m_device->ClearRenderTargetView(m_renderTargetView,D3DXCOLOR(0.0f, 0.125f, 0.3f, 1.0f));
//Then, we clear the depth buffer
m_device->ClearDepthStencilView(m_depthStencilView,D3D10_CLEAR_DEPTH,1.0f, 0);
//Update variables
m_worldVariable->SetMatrix((float*)&m_world);
m_viewVariable->SetMatrix((float*)&m_view);
m_projectionVariable->SetMatrix((float*)&m_projection);
//Render the cube
D3D10_TECHNIQUE_DESC techDesc;
m_technique->GetDesc(&techDesc);
for(UINT pass = 0; pass < techDesc.Passes ; pass++){
m_technique->GetPassByIndex(pass)->Apply(0);
m_device->DrawIndexed(36,0,0);
}
m_swapChain->Present(0,0);
}
It is exactly the same as the 5th tutorial on DirectX10 in the DirectX SDK (June 2010) under the "Samples" folder, except it's encapsulated in an object.
My scene is initialized as follow :
HRESULT DXEngine::initStaticScene()
{
HRESULT hr;
//Vertex buffer creation and initialization
Vertex1Pos1Col vertices [] =
{
{ D3DXVECTOR3( -1.0f, 1.0f, -1.0f ), D3DXVECTOR4( 0.0f, 0.0f, 1.0f, 1.0f ) },
{ D3DXVECTOR3( 1.0f, 1.0f, -1.0f ), D3DXVECTOR4( 0.0f, 1.0f, 0.0f, 1.0f ) },
{ D3DXVECTOR3( 1.0f, 1.0f, 1.0f ), D3DXVECTOR4( 0.0f, 1.0f, 1.0f, 1.0f ) },
{ D3DXVECTOR3( -1.0f, 1.0f, 1.0f ), D3DXVECTOR4( 1.0f, 0.0f, 0.0f, 1.0f ) },
{ D3DXVECTOR3( -1.0f, -1.0f, -1.0f ), D3DXVECTOR4( 1.0f, 0.0f, 1.0f, 1.0f ) },
{ D3DXVECTOR3( 1.0f, -1.0f, -1.0f ), D3DXVECTOR4( 1.0f, 1.0f, 0.0f, 1.0f ) },
{ D3DXVECTOR3( 1.0f, -1.0f, 1.0f ), D3DXVECTOR4( 1.0f, 1.0f, 1.0f, 1.0f ) },
{ D3DXVECTOR3( -1.0f, -1.0f, 1.0f ), D3DXVECTOR4( 0.0f, 0.0f, 0.0f, 1.0f ) },
};
D3D10_BUFFER_DESC desc;
desc.Usage = D3D10_USAGE_DEFAULT;
desc.ByteWidth = sizeof(Vertex1Pos1Col) * 8;
desc.BindFlags = D3D10_BIND_VERTEX_BUFFER;
desc.CPUAccessFlags = 0;
desc.MiscFlags = 0;
D3D10_SUBRESOURCE_DATA data;
data.pSysMem = vertices;
hr = m_device->CreateBuffer(&desc,&data,&m_vertexBuffer);
if(FAILED(hr)){
MessageBox(NULL,TEXT("Vertex buffer creation failed"), TEXT("Error"),MB_OK);
return hr;
}
UINT stride = sizeof(Vertex1Pos1Col);
UINT offset = 0;
m_device->IASetVertexBuffers(0,1,&m_vertexBuffer,&stride,&offset);
//Index buffer creation and initialization
DWORD indices[] =
{
3,1,0,
2,1,3,
0,5,4,
1,5,0,
3,4,7,
0,4,3,
1,6,5,
2,6,1,
2,7,6,
3,7,2,
6,4,5,
7,4,6,
};
desc.Usage = D3D10_USAGE_DEFAULT;
desc.ByteWidth = sizeof(DWORD) * 36;
desc.BindFlags = D3D10_BIND_INDEX_BUFFER;
desc.CPUAccessFlags = 0;
desc.MiscFlags = 0;
data.pSysMem = vertices;
hr = m_device->CreateBuffer(&desc,&data,&m_indexBuffer);
if(FAILED(hr)){
MessageBox(NULL,TEXT("Index buffer creation failed"), TEXT("Error"),MB_OK);
return hr;
}
m_device->IASetIndexBuffer(m_indexBuffer,DXGI_FORMAT_R32_FLOAT,0);
//Set the primitive topology, i.e. how indices should be interpreted (here, as a triangle list)
m_device->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
D3DXMatrixIdentity(&m_world);
D3DXVECTOR3 eye(0.0f, 1.0f, -10.0f);
D3DXVECTOR3 at(0.0f, 1.0f, 0.0f);
D3DXVECTOR3 up(0.0f, 1.0f, 0.0f);
D3DXMatrixLookAtLH(&m_view, &eye, &at, &up);
D3DXMatrixPerspectiveFovLH(&m_projection, (float)D3DX_PI * 0.25f, m_width/(FLOAT)m_height, 0.1f, 100.0f);
return hr;
}
Once again, it's the exact same code (but encapsulated) as the tutorial I mentionned earlier.
When I open the Tutorial Visual Studio Solution in my IDE, it works and displays nicely what is described in the scene, but when I try to run my "encapsulated" version of this code, nothing shows up but the background color...
Note : My windows message pumps works fine, I can even handle user inputs the way I want, everything's fine. My application performs correctly my engine initialization (I check every single returned error code and there's nothing else but S_OK codes).
I have no clue where to search now. I've checked my code times and times again and it's exactly the same as the tutorial, I've checked that everything I encapsulate is set and accessed correctly, etc, but I still can't display anything else than the background color...
I was wondering if anyone here could have an idea of what could possibly cause this, or at least hints on where to look for...
EDIT: Effect file used :
//--------------------------------------------------------------------------------------
// File: Tutorial05.fx
//
// Copyright (c) Microsoft Corporation. All rights reserved.
//--------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------
// Constant Buffer Variables
//--------------------------------------------------------------------------------------
matrix World;
matrix View;
matrix Projection;
//--------------------------------------------------------------------------------------
struct VS_INPUT
{
float4 Pos : POSITION;
float4 Color : COLOR;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
//--------------------------------------------------------------------------------------
// Vertex Shader
//--------------------------------------------------------------------------------------
PS_INPUT VS( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
output.Pos = mul( input.Pos, World );
output.Pos = mul( output.Pos, View );
output.Pos = mul( output.Pos, Projection );
output.Color = input.Color;
return output;
}
//--------------------------------------------------------------------------------------
// Pixel Shader
//--------------------------------------------------------------------------------------
float4 PS( PS_INPUT input) : SV_Target
{
return input.Color;
}
//--------------------------------------------------------------------------------------
technique10 Render
{
pass P0
{
SetVertexShader( CompileShader( vs_4_0, VS() ) );
SetGeometryShader( NULL );
SetPixelShader( CompileShader( ps_4_0, PS() ) );
}
}
I think, that this can be an error:
Input assembler stage of D3D (10 and 11) pipeline is always waiting for DXGI_FORMAT_***_UINT format for index buffers. MSDN proves this:
A DXGI_FORMAT that specifies the format of the data in the index
buffer. The only formats allowed for index buffer data are 16-bit
(DXGI_FORMAT_R16_UINT) and 32-bit (DXGI_FORMAT_R32_UINT) integers.
Then look at your code that binds your buffer to IA:
m_device->IASetIndexBuffer(m_indexBuffer, DXGI_FORMAT_R32_FLOAT, 0);
I think you should use DXGI_FORMAT_R32_UINT for your case, like this:
m_device->IASetIndexBuffer(m_indexBuffer, DXGI_FORMAT_R32_UINT, 0);