DirectX incorrect texture - c++

My DirectX application does not render the texture correctly. Result:
Expected from VS editor:
As you can see the cat texture is not completely drawn.
I 'm using WaveFrontReader to load the .OBJ and the .MTL files and WicTextureLoader to load the PNG/JPG.
My HLSL:
cbuffer constants : register(b0)
{
row_major float4x4 transform;
row_major float4x4 projection;
float3 lightvector;
}
struct vs_in
{
float3 position : POS;
float3 normal : NOR;
float2 texcoord : TEX;
float4 color : COL;
};
struct vs_out
{
float4 position : SV_POSITION;
float2 texcoord : TEX;
float4 color : COL;
};
Texture2D mytexture : register(t0);
SamplerState mysampler : register(s0);
vs_out vs_main(vs_in input)
{
float light = clamp(dot(normalize(mul(float4(input.normal, 0.0f), transform).xyz), normalize(-lightvector)), 0.0f, 1.0f) * 0.8f + 0.2f;
vs_out output;
output.position = mul(float4(input.position, 1.0f), mul(transform, projection));
output.texcoord = input.texcoord;
output.color = float4(input.color.rgb * light, input.color.a);
return output;
}
float4 ps_main(vs_out input) : SV_TARGET
{
return mytexture.Sample(mysampler, input.texcoord) * input.color;
}
My preparation:
void Config3DWindow()
{
const wchar_t* tf = L"1.hlsl";
d2d.m_swapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), reinterpret_cast<void**>(&frameBuffer));
d2d.device->CreateRenderTargetView(frameBuffer, nullptr, &frameBufferView);
frameBuffer->GetDesc(&depthBufferDesc); // base on framebuffer properties
depthBufferDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthBufferDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
CComPtr<ID3DBlob> vsBlob;
D3DCompileFromFile(tf, nullptr, nullptr, "vs_main", "vs_5_0", 0, 0, &vsBlob, nullptr);
d2d.device->CreateVertexShader(vsBlob->GetBufferPointer(), vsBlob->GetBufferSize(), nullptr, &vertexShader);
D3D11_INPUT_ELEMENT_DESC inputElementDesc[] =
{
{ "POS", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEX", 0, DXGI_FORMAT_R32G32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COL", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
d2d.device->CreateInputLayout(inputElementDesc, ARRAYSIZE(inputElementDesc), vsBlob->GetBufferPointer(), vsBlob->GetBufferSize(), &inputLayout);
///////////////////////////////////////////////////////////////////////////////////////////////
CComPtr<ID3DBlob> psBlob;
D3DCompileFromFile(tf, nullptr, nullptr, "ps_main", "ps_5_0", 0, 0, &psBlob, nullptr);
d2d.device->CreatePixelShader(psBlob->GetBufferPointer(), psBlob->GetBufferSize(), nullptr, &pixelShader);
D3D11_BUFFER_DESC constantBufferDesc = {};
constantBufferDesc.ByteWidth = sizeof(Constants) + 0xf & 0xfffffff0;
constantBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
constantBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
constantBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
d2d.device->CreateBuffer(&constantBufferDesc, nullptr, &constantBuffer);
}
Loading the obj:
WaveFrontReader<UINT> wfr;
wfr.Load(L"12221_Cat_v1_l3.oobj");
wfr.LoadMTL(L"12221_Cat_v1_l3.mtl");
obj.CreateDirect3D2(wfr);
CreateDirect3D2() function:
std::vector<float> Vertices;
// float VertexDataX[] = // float3 position, float3 normal, float2 texcoord, float4 color
auto numV = wf.vertices.size();
Vertices.resize(numV * 12);
for (size_t i = 0; i < numV; i++)
{
auto& v = wf.vertices[i];
float* i2 = Vertices.data() + (i * 12);
// position
i2[0] = v.position.x;
i2[1] = v.position.y;
i2[2] = v.position.z;
// normal
i2[3] = v.normal.x;
i2[4] = v.normal.y;
i2[5] = v.normal.z;
// tx
i2[6] = v.textureCoordinate.x;
i2[7] = v.textureCoordinate.y;
// Colors
i2[8] = 1.0f;
i2[9] = 1.0f;
i2[10] = 1.0f;
i2[11] = 1.0f;
}
D3D11_BUFFER_DESC vertexBufferDesc = {};
vertexBufferDesc.ByteWidth = Vertices.size() * sizeof(float);
vertexBufferDesc.Usage = D3D11_USAGE_IMMUTABLE;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
D3D11_SUBRESOURCE_DATA vertexData = { Vertices.data() }; // in data.h
vertexBuffer = 0;
d2d.device->CreateBuffer(&vertexBufferDesc, &vertexData, &vertexBuffer);
// Indices
std::vector<UINT>& Indices = wf.indices;
D3D11_BUFFER_DESC indexBufferDesc = {};
IndicesSize = Indices.size() * sizeof(UINT);
indexBufferDesc.ByteWidth = IndicesSize;
indexBufferDesc.Usage = D3D11_USAGE_IMMUTABLE;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
D3D11_SUBRESOURCE_DATA indexData = { Indices.data() }; // in data.h
indexBuffer = 0;
d2d.device->CreateBuffer(&indexBufferDesc, &indexData, &indexBuffer);
for (auto& ma : wf.materials)
{
CComPtr<ID3D11Resource> tex;
CComPtr<ID3D11ShaderResourceView> texv;
CreateWICTextureFromFile(d2d.device, d2d.context, ma.strTexture, &tex, &texv,0);
if (tex && texv)
{
OBJFT ot;
ot.texture = tex;
ot.textureView = texv;
textures.push_back(ot);
}
tex = 0;
texv = 0;
}
The drawing function:
void Present(OBJF& o, int Count, _3DP& _3, D2D1_COLOR_F bcol)
{
float w = static_cast<float>(depthBufferDesc.Width); // width
float h = static_cast<float>(depthBufferDesc.Height); // height
float n = 1000.0f; // near
float f = 1000000.0f; // far
matrix rotateX = { 1, 0, 0, 0, 0, static_cast<float>(cos(_3.rotation[0])), -static_cast<float>(sin(_3.rotation[0])), 0, 0, static_cast<float>(sin(_3.rotation[0])), static_cast<float>(cos(_3.rotation[0])), 0, 0, 0, 0, 1 };
matrix rotateY = { static_cast<float>(cos(_3.rotation[1])), 0, static_cast<float>(sin(_3.rotation[1])), 0, 0, 1, 0, 0, -static_cast<float>(sin(_3.rotation[1])), 0, static_cast<float>(cos(_3.rotation[1])), 0, 0, 0, 0, 1 };
matrix rotateZ = { static_cast<float>(cos(_3.rotation[2])), -static_cast<float>(sin(_3.rotation[2])), 0, 0, static_cast<float>(sin(_3.rotation[2])), static_cast<float>(cos(_3.rotation[2])), 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
matrix scale = { _3.scale[0], 0, 0, 0, 0, _3.scale[1], 0, 0, 0, 0, _3.scale[2], 0, 0, 0, 0, 1 };
matrix translate = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, _3.translation[0], _3.translation[1], _3.translation[2], 1 };
///////////////////////////////////////////////////////////////////////////////////////////
D3D11_MAPPED_SUBRESOURCE mappedSubresource = {};
d2d.context->Map(constantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedSubresource);
Constants* constants = reinterpret_cast<Constants*>(mappedSubresource.pData);
constants->Transform = rotateX * rotateY * rotateZ * scale * translate;
constants->Projection = { 2 * n / w, 0, 0, 0, 0, 2 * n / h, 0, 0, 0, 0, f / (f - n), 1, 0, 0, n * f / (n - f), 0 };
constants->LightVector = { 1.0f, 1.0f, 1.0f };
d2d.context->Unmap(constantBuffer, 0);
///////////////////////////////////////////////////////////////////////////////////////////
FLOAT backgroundColor[4] = { 0.00f, 0.00f, 0.00f, 1.0f };
if (bcol.a > 0)
{
backgroundColor[0] = bcol.r;
backgroundColor[1] = bcol.g;
backgroundColor[2] = bcol.b;
backgroundColor[3] = bcol.a;
}
UINT stride = 12 * 4; // vertex size (12 floats: float3 position, float3 normal, float2 texcoord, float4 color)
UINT offset = 0;
D3D11_VIEWPORT viewport = { 0.0f, 0.0f, w, h, 0.0f, 1.0f };
///////////////////////////////////////////////////////////////////////////////////////////
auto deviceContext = d2d.context;
deviceContext->ClearRenderTargetView(frameBufferView, backgroundColor);
deviceContext->ClearDepthStencilView(depthBufferView, D3D11_CLEAR_DEPTH, 1.0f, 0);
deviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
deviceContext->IASetInputLayout(inputLayout);
deviceContext->IASetVertexBuffers(0, 1, &o.vertexBuffer.p, &stride, &offset);
deviceContext->IASetIndexBuffer(o.indexBuffer, DXGI_FORMAT_R32_UINT, 0);
deviceContext->VSSetShader(vertexShader, nullptr, 0);
deviceContext->VSSetConstantBuffers(0, 1, &constantBuffer.p);
deviceContext->RSSetViewports(1, &viewport);
deviceContext->PSSetShader(pixelShader, nullptr, 0);
std::vector<ID3D11ShaderResourceView*> rsx;
for (auto& t : o.textures)
rsx.push_back(t.textureView);
ID3D11ShaderResourceView** rr = rsx.data();
deviceContext->PSSetShaderResources(0, rsx.size(), rr);
deviceContext->PSSetSamplers(0, 1, &samplerState.p);
deviceContext->OMSetRenderTargets(1, &frameBufferView.p, depthBufferView);
deviceContext->OMSetDepthStencilState(depthStencilState, 0);
///////////////////////////////////////////////////////////////////////////////////////////
DXGI_RGBA ra = { 1,1,1,1 };
deviceContext->DrawIndexed(o.IndicesSize, 0, 0);
d2d.m_swapChain1->Present(1, 0);
}
Entire project here: https://drive.google.com/open?id=1BbW3DUd20bAwei4KjnkUPwgm5Ia1aRxl

This is what I got after I was able to reproduce the issue of OP on my side:
My only change was that I exluded lighting in the shader code:
vs_out vs_main(vs_in input)
{
float light = 1.0f;
//float light = clamp(dot(normalize(mul(float4(input.normal, 0.0f), transform).xyz), normalize(-lightvector)), 0.0f, 1.0f) * 0.8f + 0.2f;
vs_out output;
output.position = mul(float4(input.position, 1.0f), mul(transform, projection));
output.texcoord = input.texcoord;
output.color = float4(input.color.rgb * light, input.color.a);
return output;
}
Then I became aware of the cat's eye on the cat's tail.
That reminded me that a lot of image formats store the image from top to down.
OpenGL textures (and probably Direct3D as well) has usually the origin in the lower left corner. Hence, it's not un-usual that texture images are mirrored vertically (during or after loading the image from file and before sending it to GPU).
To prove my suspicion, I mirrored the image manually (in GIMP) and then (without re-compiling) got this:
It looks like my suspicion was right.
Something is wrong with the image or texture loading in the loader of OP.

Related

Draw a rectangle with DX11

I need to draw a simple rectangle (not a filled box) with directx 11.
I have found this code:
const float x = 0.1;
const float y = 0.1;
const float height = 0.9;
const float width = 0.9;
VERTEX OurVertices[] =
{
{ x, y, 0, col },
{ x + width, y, 0, col },
{ x, y + height, 0, col },
{ x + width, y, 0, col },
{ x + width, y + height, 0 , col },
{ x, y + height, 0, col }
};
static const XMVECTORF32 col = { 1.f, 2.f, 3.f, 4.f };
// this is the function used to render a single frame
void RenderFrameTest(void)
{
// float color[4] = { 0.0f, 0.2f, 0.4f, 1.0f };
float color[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
// clear the back buffer to a deep blue
devcon->ClearRenderTargetView(backbuffer, color);
// select which vertex buffer to display
UINT stride = sizeof(VERTEX);
UINT offset = 0;
devcon->IASetVertexBuffers(0, 1, &pVBuffer, &stride, &offset);
// select which primtive type we are using
// draw the vertex buffer to the back buffer
devcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
devcon->Draw(sizeof(OurVertices) / sizeof(VERTEX), 0);
swapchain->Present(0, 0);
}
void DrawRectangle(float x, float y, float width, float height, XMVECTORF32 col)
{
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(OurVertices);
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
HRESULT val = dev->CreateBuffer(&bd, NULL, &pVBuffer); // create the buffer
D3D11_MAPPED_SUBRESOURCE ms;
val = devcon->Map(pVBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, OurVertices, sizeof(OurVertices)); // copy the data
devcon->Unmap(pVBuffer, NULL);
}
but the result is not what I expected:
I suspect the problem is OurVertices array and "D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP" but I don't have have experience with DirectX in general.
Can you help me please ?
You have 6 vertices defined for a rectangle, which means you want to use TriangleList topology and not TriangleStrip topology.

DirectX 11 Tesellation Shader Not Working

I have a problem with my tesellation shader. It renders when i don't use Hull Shader or Domain Shader. Just plain vertex and pixel shader works fine. Here is my VS and PS shaders:
VOut VShader(float4 position : POSITION, float4 color : COLOR)
{
VOut output;
output.position = mul(world, position);
output.color = color;
return output;
}
float4 PShader(float4 position : SV_POSITION, float4 color : COLOR) : SV_TARGET
{
return color;
}
I am using Orthographic Projection to map out pixels to their original positions. Here is the problematic shader. Which inputs float3 vertices and float4 colors.
cbuffer cbPerFrame : register(b0) {
matrix world; };
struct VS_CONTROL_POINT_INPUT {
float3 vPosition : POSITION; };
struct VS_CONTROL_POINT_OUTPUT {
float3 vPosition : POSITION; };
struct HS_CONSTANT_DATA_OUTPUT {
float Edges[3] : SV_TessFactor;
float Inside : SV_InsideTessFactor; };
struct HS_OUTPUT {
float3 vPosition : POSITION; };
HS_CONSTANT_DATA_OUTPUT ConstantHS(InputPatch<VS_CONTROL_POINT_OUTPUT, 3> ip,uint PatchID : SV_PrimitiveID) {
HS_CONSTANT_DATA_OUTPUT Output;
Output.Edges[0] = Output.Edges[1] = Output.Edges[2] = 4;
Output.Inside = 4;
return Output; }
[domain("tri")]
[partitioning("integer")]
[outputtopology("triangle_cw")]
[outputcontrolpoints(3)]
[patchconstantfunc("ConstantHS")]
HS_OUTPUT HShader(InputPatch<VS_CONTROL_POINT_OUTPUT, 3> p, uint i : SV_OutputControlPointID, uint PatchID : SV_PrimitiveID)
{
HS_OUTPUT Output;
Output.vPosition = p[i].vPosition;
return Output;
}
struct DS_OUTPUT {
float4 vPosition : SV_POSITION; };
[domain("tri")]
DS_OUTPUT DShader(HS_CONSTANT_DATA_OUTPUT input, float3 UVW : SV_DomainLocation, const OutputPatch<HS_OUTPUT, 3> quad) {
DS_OUTPUT Output;
float3 finalPos = UVW.x * quad[0].vPosition + UVW.y * quad[1].vPosition + UVW.z * quad[2].vPosition;
Output.vPosition = mul(world,float4(finalPos, 1));
return Output; }
VS_CONTROL_POINT_OUTPUT VShader(VS_CONTROL_POINT_INPUT Input) {
VS_CONTROL_POINT_OUTPUT Output;
Output.vPosition = Input.vPosition;
return Output; }
float4 PShader(DS_OUTPUT Input) : SV_TARGET {
return float4(1, 0, 0, 1); }
My shader init. code:
D3DCompileFromFile(L"shader.hlsl", NULL, NULL, "VShader", "vs_5_0", 0, 0, &VS, &ERR);
D3DCompileFromFile(L"shader.hlsl", NULL, NULL, "PShader", "ps_5_0", 0, 0, &PS, &ERR);
D3DCompileFromFile(L"shader.hlsl", NULL, NULL, "HShader", "hs_5_0", 0, 0, &HS, &ERR);
D3DCompileFromFile(L"shader.hlsl", NULL, NULL, "DShader", "ds_5_0", 0, 0, &DS, &ERR);
dev->CreateVertexShader(VS->GetBufferPointer(), VS->GetBufferSize(), NULL, &pVS);
dev->CreatePixelShader(PS->GetBufferPointer(), PS->GetBufferSize(), NULL, &pPS);
dev->CreateHullShader(HS->GetBufferPointer(), HS->GetBufferSize(), NULL, &pHS);
dev->CreateDomainShader(DS->GetBufferPointer(), DS->GetBufferSize(), NULL, &pDS);
devcon->VSSetShader(pVS, 0, 0);
devcon->HSSetShader(pHS, 0, 0);
devcon->DSSetShader(pDS, 0, 0);
devcon->PSSetShader(pPS, 0, 0);
Input descriptor:
D3D11_INPUT_ELEMENT_DESC ied[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0},
};
dev->CreateInputLayout(ied, 2, VS->GetBufferPointer(), VS->GetBufferSize(), &pLayout);
devcon->IASetInputLayout(pLayout);
Rasterizer:
D3D11_RASTERIZER_DESC RasterDesc = {};
RasterDesc.FillMode = D3D11_FILL_SOLID;
RasterDesc.CullMode = D3D11_CULL_NONE;
RasterDesc.DepthClipEnable = TRUE;
ID3D11RasterizerState* WireFrame=NULL;
dev->CreateRasterizerState(&RasterDesc, &WireFrame);
devcon->RSSetState(WireFrame);
Input Vertices:
OurVertices = (VERTEX*)malloc(PointCount * sizeof(VERTEX));
for (int i = 0; i < PointCount; i++)
{
OurVertices[i] = { RandOm() * i,RandOm() * i ,RandOm() ,{abs(RandOm()),abs(RandOm()),abs(RandOm()),1.0f} };
}
CBuffer:
ID3D11Buffer* g_pConstantBuffer11 = NULL;
cbuff.world = XMMatrixOrthographicOffCenterLH(SceneY - (ViewPortWidth / 2) * SceneZoom, SceneY + (ViewPortWidth / 2) * SceneZoom,
SceneX - (ViewPortHeight / 2) * SceneZoom, SceneX + (ViewPortHeight / 2) * SceneZoom,-10000.0f, 10000.0f);
D3D11_BUFFER_DESC cbDesc;
cbDesc.ByteWidth = sizeof(CBUFFER);
cbDesc.Usage = D3D11_USAGE_DYNAMIC;
cbDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
cbDesc.MiscFlags = 0;
cbDesc.StructureByteStride = 0;
D3D11_SUBRESOURCE_DATA InitData;
InitData.pSysMem = &cbuff;
InitData.SysMemPitch = 0;
InitData.SysMemSlicePitch = 0;
dev->CreateBuffer(&cbDesc, &InitData,&g_pConstantBuffer11);
devcon->VSSetConstantBuffers(0, 1, &g_pConstantBuffer11);
On Render:
devcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_3_CONTROL_POINT_PATCHLIST);
All possible exceptions are handled in this code. Just for clean code, they are removed. And there are no warnings or exceptions on Init and Render stage.
I can't even debug it because nothing being drawn to output. Also, is there any way to see output values from shaders?
cbuffer cbPerFrame : register(b0)
{
matrix world;
};
Is not used by the vertex shader anymore, but by the domain shader, however, it does not seem you are attaching it, so you should have :
devcon->VSSetConstantBuffers(0, 1, &g_pConstantBuffer11); //Vertex
devcon->DSSetConstantBuffers(0, 1, &g_pConstantBuffer11); //Domain
(Please note that you don't normally need to bind to vertex anymore if you use tesselation only).
Otherwise the pipeline will read a zero matrix.

C++ Depth Buffer Oculus Rift DK2 VR HMD

This is a question for anybody experienced with the Oculus Rift C++ SDK.
SDK Version: ovr_sdk_win_1.20.0_public
IDE: VS2013
I am trying to resolve a depth buffer problem in my code for a simplified 3D engine for the Oculus Rift DK2.
The OculusRoomTiny example supplied with the SDK is very complex as it is designed with versatility in mind. So, I've taken the SuperMinimal Sample code and used that as a basis for my engine. The SuperMinimal Sample did did not support multi-colour vertexes or depth buffers. So my code includes both.
I've had no problem creating depth buffers using Microsoft DirectX-11 libraries on PC apps so think it's an Oculus specific challenge. Note: the OculusRoomTiny example depth buffer works fine but is encapsulated in classes too complex for my code.
In my code, the Cube rendered as would be expected but inclusion of the Depth buffer only renders the world background colour.
I ripped the depth buffer code from the OculusRoomTiny_Advanced demo and am sure I have integrated it religiously.
I've posted my code for comment. Please note, it is super minimal and does not clean up the COM objects.
Any advice would be welcome as I've been playing with the code now for 6 weeks!
main.cpp
#include "d3d11.h"
#include "d3dcompiler.h"
#include "OVR_CAPI_D3D.h"
#include "DirectXMath.h"
#include "models.h"
using namespace DirectX;
#pragma comment(lib, "d3dcompiler.lib")
#pragma comment(lib, "dxgi.lib")
#pragma comment(lib, "d3d11.lib")
void CreateSampleModel(ID3D11Device * Device, ID3D11DeviceContext *
Context);
void RenderSampleModel(XMMATRIX * viewProj, ID3D11Device * Device, ID3D11DeviceContext * Context);
ID3D11Buffer * IndexBuffer;
ID3D11RenderTargetView * eyeRenderTexRtv[2][3];
ID3D11DepthStencilView *zbuffer[2]; // containing one for each eye
ovrLayerEyeFov ld = { { ovrLayerType_EyeFov } }; //Only using one layer Update this to an array if using several
ovrSession session;
ID3D11Device * Device;
ID3D11DeviceContext * Context;
void CreateDepthBufferForBothEyes(int eye)
{
// Create the Depth Buffer Texture
D3D11_TEXTURE2D_DESC texd;
ZeroMemory(&texd, sizeof(texd));
texd.Width = ld.Viewport[eye].Size.w;
texd.Height = ld.Viewport[eye].Size.h;
texd.ArraySize = 1;
texd.MipLevels = 1;
texd.SampleDesc.Count = 1; // This matches the RTV
texd.Format = DXGI_FORMAT_D32_FLOAT;
texd.BindFlags = D3D11_BIND_DEPTH_STENCIL;
ID3D11Texture2D *pDepthBuffer;
Device->CreateTexture2D(&texd, NULL, &pDepthBuffer);
// Describe specific properties of the Depth Stencil Buffer
D3D11_DEPTH_STENCIL_VIEW_DESC dsvd;
ZeroMemory(&dsvd, sizeof(dsvd));
dsvd.Format = DXGI_FORMAT_D32_FLOAT; // Make the same as the texture format
dsvd.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
Device->CreateDepthStencilView(pDepthBuffer, &dsvd, &zbuffer[eye]);
pDepthBuffer->Release();
}
//-------------------------------------------------------------------------------------------------
int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE, LPWSTR, int)
{
// Init Rift and device
ovr_Initialize(0);
ovrGraphicsLuid luid;
ovr_Create(&session, &luid);
IDXGIFactory * DXGIFactory; CreateDXGIFactory1(__uuidof(IDXGIFactory), (void**)(&DXGIFactory));
IDXGIAdapter * DXGIAdapter; DXGIFactory->EnumAdapters(0, &DXGIAdapter);
D3D11CreateDevice(DXGIAdapter, D3D_DRIVER_TYPE_UNKNOWN, 0, 0, 0, 0, D3D11_SDK_VERSION, &Device, 0, &Context);
// Create eye render buffers
for (int eye = 0; eye < 2; eye++)
{
ld.Fov[eye] = ovr_GetHmdDesc(session).DefaultEyeFov[eye];
ld.Viewport[eye].Size = ovr_GetFovTextureSize(session, (ovrEyeType)eye, ld.Fov[eye], 1.0f);
ovrTextureSwapChainDesc desc = {};
desc.Type = ovrTexture_2D;
desc.ArraySize = 1;
desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
desc.Width = ld.Viewport[eye].Size.w;
desc.Height = ld.Viewport[eye].Size.h;
desc.MipLevels = 1;
desc.SampleCount = 1;
desc.StaticImage = ovrFalse;
desc.MiscFlags = ovrTextureMisc_DX_Typeless;
desc.BindFlags = ovrTextureBind_DX_RenderTarget;
ovr_CreateTextureSwapChainDX(session, Device, &desc, &ld.ColorTexture[eye]);
int textureCount = 0; ovr_GetTextureSwapChainLength(session, ld.ColorTexture[eye], &textureCount);
for (int j = 0; j < textureCount; j++) // Creates 3 backbuffers for each eye
{
ID3D11Texture2D* tex; ovr_GetTextureSwapChainBufferDX(session, ld.ColorTexture[eye], j, IID_PPV_ARGS(&tex));
D3D11_RENDER_TARGET_VIEW_DESC rtvd = { DXGI_FORMAT_R8G8B8A8_UNORM, D3D11_RTV_DIMENSION_TEXTURE2D };
Device->CreateRenderTargetView(tex, &rtvd, &eyeRenderTexRtv[eye][j]);
}
CreateDepthBufferForBothEyes(eye);
}
// Create sample model to be rendered in VR
CreateSampleModel(Device,Context);
// Loop for some frames, then terminate
float camX = 0.0f;
float camY = 0.0f;
float camZ = 0.0f;
for (long long frameIndex = 0; frameIndex < 1000;)
{
if (GetKeyState(VK_LEFT) & 0x8000)
camX -= 0.001f;
if (GetKeyState(VK_RIGHT) & 0x8000)
camX += 0.001f;
if (GetKeyState(VK_UP) & 0x8000)
camY += 0.001f;
if (GetKeyState(VK_DOWN) & 0x8000)
camY -= 0.001f;
if (GetKeyState(VK_NEXT) & 0x8000)
camZ += 0.001f;
if (GetKeyState(VK_PRIOR) & 0x8000)
camZ -= 0.001f;
// Get pose using a default IPD
ovrPosef HmdToEyePose[2] = {{{0,0,0,1}, {-0.032f, 0, 0}},
{{0,0,0,1}, {+0.032f, 0, 0}}};
ovrPosef pose[2]; ovr_GetEyePoses(session, 0, ovrTrue, HmdToEyePose, pose, &ld.SensorSampleTime);
//for (int eye = 0; eye < 2; eye++)
// ld.RenderPose[eye] = pose[eye]; // Update the Layer description with the new head position for each eye
// Render to each eye
for (int eye = 0; eye < 2; eye++)
{
ld.RenderPose[eye] = pose[eye]; // Update the Layer description with the new head position for each eye
// Set and clear current render target, and set viewport
int index = 0; ovr_GetTextureSwapChainCurrentIndex(session, ld.ColorTexture[eye], &index);
Context->OMSetRenderTargets(1, &eyeRenderTexRtv[eye][index], zbuffer[eye]); // zbuffer[eye]
Context->ClearRenderTargetView(eyeRenderTexRtv[eye][index], new float[]{ 0.1f, 0.0f, 0.0f, 0.0f });
Context->ClearDepthStencilView(zbuffer[eye], D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0, 0);
D3D11_VIEWPORT D3Dvp; // = { 0, 0, (float)ld.Viewport[eye].Size.w, (float)ld.Viewport[eye].Size.h };
D3Dvp.TopLeftX = 0.0f;
D3Dvp.TopLeftY = 0.0f;
D3Dvp.Width = (float)ld.Viewport[eye].Size.w;
D3Dvp.Height = (float)ld.Viewport[eye].Size.h;
D3Dvp.MinDepth = 0;
D3Dvp.MaxDepth = 1;
Context->RSSetViewports(1, &D3Dvp);
pose[eye].Position.z = 2.0f + camZ; // Move camera 2m from cube
pose[eye].Position.x = camX;
pose[eye].Position.y = camY;
// Calculate view and projection matrices using pose and SDK
XMVECTOR rot = XMLoadFloat4((XMFLOAT4 *)&pose[eye].Orientation);
XMVECTOR pos = XMLoadFloat3((XMFLOAT3 *)&pose[eye].Position);
XMVECTOR up = XMVector3Rotate(XMVectorSet(0, 1, 0, 0), rot);
XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, 1, 0), rot);
XMMATRIX view = XMMatrixLookAtLH(pos, XMVectorAdd(pos, forward), up);
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], 0, 1, ovrProjection_None);
XMMATRIX proj = XMMatrixTranspose(XMLoadFloat4x4((XMFLOAT4X4 *)&p));
// Render model and commit frame
RenderSampleModel(&XMMatrixMultiply(view, proj), Device, Context);
ovr_CommitTextureSwapChain(session, ld.ColorTexture[eye]);
}
// Send rendered eye buffers to HMD, and increment the frame if we're visible
ovrLayerHeader* layers[1] = { &ld.Header };
if (ovrSuccess == ovr_SubmitFrame(session, 0, nullptr, layers, 1))
frameIndex; // was frameIndex++; but changed to loop forever
}
ovr_Shutdown();
}
//---------------------------------------------------------------------------------------
// THIS CODE IS NOT SPECIFIC TO VR OR THE SDK, JUST USED TO DRAW SOMETHING IN VR
//---------------------------------------------------------------------------------------
void CreateSampleModel(ID3D11Device * Device, ID3D11DeviceContext * Context)
{
// Create Vertex Buffer
//#define V(n) (n&1?+1.0f:-1.0f), (n&2?-1.0f:+1.0f), (n&4?+1.0f:-1.0f)
//float vertices[] = { V(0), V(3), V(2), V(6), V(3), V(7), V(4), V(2), V(6), V(1), V(5), V(3), V(4), V(1), V(0), V(5), V(4), V(7) };
D3D11_BUFFER_DESC vbDesc = { sizeof(VERTEX) * NUM_OF_VERTICES, D3D11_USAGE_DEFAULT, D3D11_BIND_VERTEX_BUFFER };
D3D11_SUBRESOURCE_DATA initData = { tList };
ID3D11Buffer* VertexBuffer;
Device->CreateBuffer(&vbDesc, &initData, &VertexBuffer);
//create the index buffer
D3D11_BUFFER_DESC bd;
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(short) * NUM_OF_INDICES; // 3 per triangle, 12 triangles
bd.BindFlags = D3D11_BIND_INDEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
Device->CreateBuffer(&bd, NULL, &IndexBuffer);
D3D11_MAPPED_SUBRESOURCE ms;
Context->Map(IndexBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, indexes, NUM_OF_INDICES * sizeof(short)); // copy the data
Context->Unmap(IndexBuffer, NULL);
// Create Vertex Shader
char* vShader = "float4x4 m;"
"struct VOut { "
" float4 position : SV_POSITION;"
" float4 color : COLOR;"
"}; "
"VOut VS(float4 p1 : POSITION, float4 colour: COLOR)"
"{"
" VOut output;"
" output.position = mul(m, p1);"
" output.color = colour;"
" return output;"
"}";
ID3D10Blob * pBlob; D3DCompile(vShader, strlen(vShader), "VS", 0, 0, "VS", "vs_4_0", 0, 0, &pBlob, 0);
ID3D11VertexShader * VertexShader;
Device->CreateVertexShader(pBlob->GetBufferPointer(), pBlob->GetBufferSize(), 0, &VertexShader);
// Create Input Layout
D3D11_INPUT_ELEMENT_DESC elements[] = {
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_B8G8R8A8_UNORM, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
ID3D11InputLayout * InputLayout;
Device->CreateInputLayout(elements, 2, pBlob->GetBufferPointer(), pBlob->GetBufferSize(), &InputLayout);
// Create Pixel Shader
char* pShader = "float4 PS(float4 position : POSITION, float4 colour : COLOR) : SV_Target { return colour; }";
D3DCompile(pShader, strlen(pShader), "PS", 0, 0, "PS", "ps_4_0", 0, 0, &pBlob, 0);
ID3D11PixelShader * PixelShader;
Device->CreatePixelShader(pBlob->GetBufferPointer(), pBlob->GetBufferSize(), 0, &PixelShader);
Context->IASetInputLayout(InputLayout);
Context->IASetIndexBuffer(IndexBuffer, DXGI_FORMAT_R16_UINT, 0);
UINT stride = sizeof(float) * 4U; // 7U
UINT offset = 0;
Context->IASetVertexBuffers(0, 1, &VertexBuffer, &stride, &offset);
Context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
Context->VSSetShader(VertexShader, 0, 0);
Context->PSSetShader(PixelShader, 0, 0);
}
//------------------------------------------------------
void RenderSampleModel(XMMATRIX * viewProj, ID3D11Device * Device, ID3D11DeviceContext * Context)
{
D3D11_BUFFER_DESC desc = { sizeof(XMMATRIX), D3D11_USAGE_DYNAMIC, D3D11_BIND_CONSTANT_BUFFER, D3D11_CPU_ACCESS_WRITE };
D3D11_SUBRESOURCE_DATA initData = { viewProj };
ID3D11Buffer * ConstantBuffer; Device->CreateBuffer(&desc, &initData, &ConstantBuffer);
Context->VSSetConstantBuffers(0, 1, &ConstantBuffer);
Context->DrawIndexed(NUM_OF_INDICES, 0, 0);
}
models.h
#ifndef MODELS_H
#define MODELS_H
#include "DirectXMath.h"
using namespace DirectX;
#define NUM_OF_MODELS 1
struct VERTEX{
float x;
float y;
float z;
uint32_t C; // Colour
};
#define NUM_OF_VERTICES 24
#define NUM_OF_INDICES 36
extern VERTEX tList[];
extern short indexes[];
#endif
models.cpp
#include "models.h"
VERTEX tList[] = {
{ -0.1f, 0.1f, 0.2f, 0xFF0000FF }, // Cube Vertex Index 0
{ 0.1f, 0.1f, 0.2f, 0xFF0000FF }, // 1
{ -0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 2
{ 0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 3
{ -0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 4
{ 0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 5
{ -0.1f, -0.1f, -0.0f, 0x00FF00FF }, // 6
{ 0.1f, -0.1f, -0.0f, 0x00FF00FF }, // 7
{ 0.1f, 0.1f, 0.2f, 0x0000FFFF }, // 8
{ 0.1f, 0.1f, -0.0f, 0x0000FFFF }, // 9
{ 0.08f, -0.1f, 0.2f, 0x0000FFFF }, // 10 Distorted in prep to test Depth
{ 0.08f, -0.1f, -0.0f, 0x0000FFFF }, // 11 Distorted in prep to test Depth
{ -0.1f, 0.1f, 0.2f, 0xFF0000FF }, // 12
{ -0.1f, 0.1f, -0.0f, 0xFF0000FF }, // 13
{ -0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 14
{ -0.1f, -0.1f, -0.0f, 0xFF0000FF }, // 15
{ -0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 16
{ 0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 17
{ -0.1f, 0.1f, 0.2f, 0x00FF00FF }, // 18
{ 0.1f, 0.1f, 0.2f, 0x00FF00FF }, // 19
{ -0.1f, -0.1f, -0.0f, 0x00FFFFFF }, // 20
{ 0.1f, -0.1f, -0.0f, 0x00FFFFFF }, // 21
{ -0.1f, -0.1f, 0.2f, 0x00FFFFFF }, // 22
{ 0.1f, -0.1f, 0.2f, 0x00FFFFFF }, // 23
};
short indexes[] = {
0, 1, 2, // FRONT QUAD
2, 1, 3,
5, 4, 6, // BACK QUAD
5, 6, 7,
8, 9, 11, // RIGHT QUAD
8, 11, 10,
13, 12,14, // LEFT QUAD
13, 14, 15,
18,16, 17, // TOP QUAD
18, 17, 19,
22, 23, 21, // BOTTOM QUAD
22, 21, 20
};
After much research I've found my own solution:
The line
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], 0, 1, ovrProjection_None);
Creates the projection Matrix.
A was simply a case of setting the projection depth correctly
float zNear = 0.1f;
float zFar = 1000.0f;
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], zNear, zFar, ovrProjection_None);
Amazing how, now 7 weeks of investigating, revealed the flaw as a simple ommission!

DirectX 11.2 triangle shape is distorted by normal vectors

I am new to DirectX. I now have a to some extent silly question.
I am using Windows 8.1 DirectX 11.2 and I am using right-hand coordinate system.
I tried to apply the texture to an equilateral triangle in x-y plane and centred at (0,0,0).
But the output shape is distorted(It doesn't look like equilateral triangle at all!!). And In theory if looking from x-axis, I can see nothing because the equilateral triangle locates in the x-y plane. But it turns out that I can see the triangle. In addition, if I change the value of normal vectors, the output shape changes too! I do not understand why and please help!
Here is the view matrix configuration:
static const XMVECTORF32 eye = { 0.0f, 0.0f, 1.5f, 0.0f };
static const XMVECTORF32 at = { 0.0f, 0.0f, 0.0f, 0.0f };
static const XMVECTORF32 up = { 0.0f, 1.0f, 0.0f, 0.0f };
Here is the vertexshader:
cbuffer ModelViewProjectionConstantBuffer : register(b0)
{
matrix model;
matrix view;
matrix projection;
};
struct VertexShaderInput
{
float3 pos : POSITION;
float3 norm : NORMAL;
float2 tex : TEXCOORD0;
};
struct PixelShaderInput
{
float4 pos : SV_POSITION;
float3 norm : NORMAL;
float2 tex : TEXCOORD0;
};
PixelShaderInput main(VertexShaderInput input)
{
PixelShaderInput vertexShaderOutput;
float4 pos = float4(input.pos, 1.0f);
pos = mul(pos, model);
pos = mul(pos, view);
pos = mul(pos, projection);
vertexShaderOutput.pos = pos;
vertexShaderOutput.tex = input.tex;
vertexShaderOutput.norm = mul(float4(normalize(input.norm), 0.0f), model).xyz;
return vertexShaderOutput;
}
Here is the pixelshader:
Texture2D Texture : register(t0);
SamplerState Sampler : register(s0);
struct PixelShaderInput
{
float4 pos : SV_POSITION;
float3 norm : NORMAL;
float2 tex : TEXCOORD0;
};
float4 main(PixelShaderInput input) : SV_TARGET
{
float3 lightDirection = normalize(float3(0, 0, -1));
return Texture.Sample(Sampler, input.tex); //* (0.8f * saturate(dot(normalize(input.norm), -lightDirection)) + 0.2f);
}
Here is the coordinates:
VertexPositionTexture vertexPositionTexture[] =
{
{ XMFLOAT3(-1.5, -0.5*sqrtf(3), 0.0f), XMFLOAT3(0.0f, 0.0f, 0.4f), XMFLOAT2(0.0, 1.0) },
{ XMFLOAT3(0.0f, sqrtf(3), 0.0f), XMFLOAT3(0.0f, 0.0f, 0.4f), XMFLOAT2(0.5, 0.0) },
{ XMFLOAT3(1.5, -0.5*sqrtf(3), 0.0f), XMFLOAT3(0.0f, 0.0f, 0.4f), XMFLOAT2(1.0, 1.0) },
}
The index array would simply be {0,1,2} in clockwise order.
So if I change the normal vector value in vertexPositionTexture XMFLOAT3(0.0, 0.0, 0.4) to XMFLOAT3(0.0, 0.0, -1), the shape will definitely change. I don't know why?
Here is how I create the DeviceDependentResources:
void TextureSceneRenderer::CreateDeviceDependentResources()
{
// Load shaders asynchronously.
auto loadVSTask = DX::ReadDataAsync(L"TextureVertexShader.cso");
auto loadPSTask = DX::ReadDataAsync(L"TexturePixelShader.cso");
BasicLoader^ loader = ref new BasicLoader(m_deviceResources->GetD3DDevice());
loader->LoadTexture(
L"cat.dds",
&m_texture,
&m_textureSRV
);
// create the sampler
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.Filter = D3D11_FILTER_ANISOTROPIC;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.MipLODBias = 0.0f;
// use 4x on feature level 9.2 and above, otherwise use only 2x
samplerDesc.MaxAnisotropy = 8;
samplerDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
samplerDesc.BorderColor[0] = 0.0f;
samplerDesc.BorderColor[1] = 0.0f;
samplerDesc.BorderColor[2] = 0.0f;
samplerDesc.BorderColor[3] = 0.0f;
// allow use of all mip levels
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateSamplerState(
&samplerDesc,
&m_sampler)
);
// After the vertex shader file is loaded, create the shader and input layout.
auto createVSTask = loadVSTask.then([this](const std::vector<byte>& fileData) {
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateVertexShader(
&fileData[0],
fileData.size(),
nullptr,
&m_vertexShader
)
);
static const D3D11_INPUT_ELEMENT_DESC vertexDesc[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateInputLayout(
vertexDesc,
ARRAYSIZE(vertexDesc),
&fileData[0],
fileData.size(),
&m_inputLayout
)
);
});
// After the pixel shader file is loaded, create the shader and constant buffer.
auto createPSTask = loadPSTask.then([this](const std::vector<byte>& fileData) {
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreatePixelShader(
&fileData[0],
fileData.size(),
nullptr,
&m_pixelShader
)
);
CD3D11_BUFFER_DESC constantBufferDesc(sizeof(ModelViewProjectionConstantBuffer), D3D11_BIND_CONSTANT_BUFFER);
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&constantBufferDesc,
nullptr,
&m_constantBuffer
)
);
});
// Once both shaders are loaded, create the mesh.
auto createCubeTask = (createPSTask && createVSTask).then([this]() {
D3D11_SUBRESOURCE_DATA vertexBufferData = { 0 };
vertexBufferData.pSysMem = this->model->getVertexPositionTexture();
vertexBufferData.SysMemPitch = 0;
vertexBufferData.SysMemSlicePitch = 0;
CD3D11_BUFFER_DESC vertexBufferDesc(sizeof(this->model- >getVertexPositionTexture()[0])*this->model->n_texture_vertex, D3D11_BIND_VERTEX_BUFFER);
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&vertexBufferDesc,
&vertexBufferData,
&m_vertexBuffer
)
);
m_indexCount = this->model->n_mesh;//ARRAYSIZE(cubeIndices);
D3D11_SUBRESOURCE_DATA indexBufferData = { 0 };
indexBufferData.pSysMem = this->model->getMeshTextureIndex();
indexBufferData.SysMemPitch = 0;
indexBufferData.SysMemSlicePitch = 0;
CD3D11_BUFFER_DESC indexBufferDesc(sizeof(this->model->getMeshTextureIndex()[0])*m_indexCount, D3D11_BIND_INDEX_BUFFER);
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&indexBufferDesc,
&indexBufferData,
&m_indexBuffer
)
);
});
// Once the cube is loaded, the object is ready to be rendered.
createCubeTask.then([this]() {
m_loadingComplete = true;
});
}
Here is the render function:
void TextureSceneRenderer::Render()
{
// Loading is asynchronous. Only draw geometry after it's loaded.
if (!m_loadingComplete)
{
return;
}
auto context = m_deviceResources->GetD3DDeviceContext();
// Set render targets to the screen.
ID3D11RenderTargetView *const targets[1] = { m_deviceResources->GetBackBufferRenderTargetView() };
context->OMSetRenderTargets(1, targets, m_deviceResources->GetDepthStencilView());
// Prepare the constant buffer to send it to the graphics device.
context->UpdateSubresource(
m_constantBuffer.Get(),
0,
NULL,
&m_constantBufferData,
0,
0
);
// Each vertex is one instance of the VertexPositionColor struct.
UINT stride = sizeof(VertexPositionColor);
UINT offset = 0;
context->IASetVertexBuffers(
0,
1,
m_vertexBuffer.GetAddressOf(),
&stride,
&offset
);
context->IASetIndexBuffer(
m_indexBuffer.Get(),
DXGI_FORMAT_R16_UINT, // Each index is one 16-bit unsigned integer (short).
0
);
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
context->IASetInputLayout(m_inputLayout.Get());
// Attach our vertex shader.
context->VSSetShader(
m_vertexShader.Get(),
nullptr,
0
);
// Send the constant buffer to the graphics device.
context->VSSetConstantBuffers(
0,
1,
m_constantBuffer.GetAddressOf()
);
// Attach our pixel shader.
context->PSSetShader(
m_pixelShader.Get(),
nullptr,
0
);
context->PSSetShaderResources(
0,
1,
m_textureSRV.GetAddressOf()
);
context->PSSetSamplers(
0, // starting at the first sampler slot
1, // set one sampler binding
m_sampler.GetAddressOf()
);
// Draw the objects.
context->DrawIndexed(
m_indexCount,
0,
0
);
}

DirectX Index Buffer Index order

I'm having some trouble drawing with vertex and index buffers in DirectX.
Here is a function that defines a cube, but I've commented everything out but the front face:
void GeometryGenerator::CreateCube(MeshData& meshData)
{
meshData.Vertices.resize(8);
meshData.Indices.resize(6);
float width;
float height;
float depth;
width = 45;
height = 45;
depth = 45;
XMFLOAT3 pos[8] =
{
XMFLOAT3(-0.5f * width, -0.5f * height, -0.5f * depth),
XMFLOAT3(-0.5f * width, -0.5f * height, 0.5f * depth),
XMFLOAT3(-0.5f * width, 0.5f * height, -0.5f * depth),
XMFLOAT3(-0.5f * width, 0.5f * height, 0.5f * depth),
XMFLOAT3( 0.5f * width, -0.5f * height, -0.5f * depth),
XMFLOAT3( 0.5f * width, -0.5f * height, 0.5f * depth),
XMFLOAT3( 0.5f * width, 0.5f * height, -0.5f * depth),
XMFLOAT3( 0.5f * width, 0.5f * height, 0.5f * depth)
};
unsigned short k[6] =
{
////1,0,2, // -x
////2,1,0,
////6,5,1, // +x
////6,7,5,
////
////0,1,5, // -y
////0,5,4,
////
////2,6,7, // +y
////2,7,3,
////4,6,0, // -z
////6,2,0,
7,3,1, //+z
5,7,1,
};
for(size_t i = 0; i < 8; ++i)
meshData.Vertices[i].Position = pos[i];
for(size_t i = 0; i < 6; ++i)
meshData.Indices[i] = k[i];
}
it draws one triangle and a line, neither of which go to the intended vertices. One triangle looks like it covers 7,3,0 and there is a line that covers 1,0. I have attached images below at 90 degree rotations:
I am happy to post whatever code is necessary, but I doubt you want me to post my entire project. A few things I think might be useful are......
My Vertex Struct:
struct Vertex
{
DirectX::XMFLOAT3 pos;
DirectX::XMFLOAT3 color;
};
My Input layout:
const D3D11_INPUT_ELEMENT_DESC vertexDesc[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
My Vertex Shader:
cbuffer ModelViewProjectionConstantBuffer : register(b0)
{
matrix model;
matrix view;
matrix projection;
};
struct VertexShaderInput
{
float3 pos : POSITION;
float3 color : COLOR0;
};
struct VertexShaderOutput
{
float4 pos : SV_POSITION;
float3 color : COLOR0;
};
VertexShaderOutput main(VertexShaderInput input)
{
VertexShaderOutput output;
float4 pos = float4(input.pos, 1.0f);
// Transform the vertex position into projected space.
pos = mul(pos, model);
pos = mul(pos, view);
pos = mul(pos, projection);
output.pos = pos;
// Pass through the color without modification.
output.color = input.color;
return output;
}
Primitive Topology:
m_d3dContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
Index Buffer Format:
m_d3dContext->IASetIndexBuffer(
m_indexBuffer.Get(),
DXGI_FORMAT_R16_UINT,
0);
Vertex Stride and Offset:
UINT stride = sizeof(Vertex);
UINT offset = 0;
m_d3dContext->IASetVertexBuffers(
0,
1,
m_vertexBuffer.GetAddressOf(),
&stride,
&offset);
Here is the DrawIndexed call:
m_d3dContext->DrawIndexed(
m_Indices.size(),
0,
0);
The code that gets the data and creates the Index Buffer:
GeometryGenerator generator;
GeometryGenerator::MeshData cubeData;
generator.CreateCube(cubeData);
//m_Indices is a std::Vector<Vertex>
m_Indices.resize(cubeData.Indices.size());
for(size_t i = 0; i < cubeData.Indices.size(); i++)
m_Indices[i] = cubeData.Indices[i];
D3D11_SUBRESOURCE_DATA indexBufferData = {0};
indexBufferData.pSysMem = &m_Indices[0];
indexBufferData.SysMemPitch = 0;
indexBufferData.SysMemSlicePitch = 0;
const UINT indexBufferWidth = m_Indices.size() * sizeof(UINT);
CD3D11_BUFFER_DESC indexBufferDesc(indexBufferWidth, D3D11_BIND_INDEX_BUFFER);
DX::ThrowIfFailed(
m_d3dDevice->CreateBuffer(
&indexBufferDesc,
&indexBufferData,
&m_indexBuffer));
Thank you for the help! I have been working with "Introduction to 3D Game Programming with DirectX 11" by Frank Luna but I can't find any discussion on issues like these.
You are most likely making a mistake in one of the following. A mistake in any could easily cause the issue you are observing.
// Input layout defined correctly:
D3D11_INPUT_ELEMENT_DESC vertexDesc[] = { { "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 } };
// Vertex shader input matches input layout:
VS_OUT main(float3 pos : POSITION)
// Primitive topology set correctly:
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
// Index buffer format set correctly:
context->IASetIndexBuffer(indexBuffer, DXGI_FORMAT_R16_UINT, 0);
// Vertex stride and offset set correctly:
UINT stride = sizeof(XMFLOAT3);
UINT offset = 0;
context->IASetVertexBuffers(0, 1, vertexBuffer, &stride, &offset);
// Draw arguments are correct:
context->DrawIndexed(6, 0, 0);