Direct3D11 renders only lines, and in weird order - c++

I have reduced a previous rending problem to a core where I am stuck.
I have a vertex buffer, consisting of 4 vertices, arranged in a plane (labeled 0 to 3):
1. .2
0. .3
and an according index buffer {0,1,2,3,0}.
Now, when I render with D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP, I achieve the expected image:
__
| |
|__|
However, when I render with D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP the result is:
| /|
|/ |
Note that no filling of triangles is performed.
Even more confusing, when using D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST the result is:
|
|
If I change the index buffer to {0,1,2,0,2,3} it renders:
| /
|/
That is, just one pixel line between the first two vertices are being drawn.
I have reduced my shaders to the most primitive examples:
Vertex shader:
struct VertexInputType
{
float4 position : POSITION;
};
struct PixelInputType
{
float4 position : SV_POSITION;
};
PixelInputType VertexShader(VertexInputType input)
{
PixelInputType output;
input.position.w = 1.0f;
output.position = input.position;
return output;
}
Pixel shader:
struct PixelInputType
{
float4 position : SV_POSITION;
};
float4 PixelShader(PixelInputType input) : SV_TARGET
{
float4 color;
color.r = 0;
color.g = 0;
color.b = 0;
color.a = 1;
return color;
}
As vertices I'm using DirectX::XMFLOAT3:
D3D11_INPUT_ELEMENT_DESC polygon_layout[1];
polygon_layout[0].SemanticName = "POSITION";
polygon_layout[0].SemanticIndex = 0;
polygon_layout[0].Format = DXGI_FORMAT_R32G32B32_FLOAT;
polygon_layout[0].InputSlot = 0;
polygon_layout[0].AlignedByteOffset = 0;
polygon_layout[0].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygon_layout[0].InstanceDataStepRate = 0;
d3d11_device->CreateInputLayout(polygon_layout, 1, compiled_vshader_buffer->GetBufferPointer(), compiled_vshader_buffer->GetBufferSize(), &input_layout);
D3D11_BUFFER_DESC vertex_buffer_desc;
vertex_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
vertex_buffer_desc.ByteWidth = sizeof(DirectX::XMFLOAT3) * 4;
vertex_buffer_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertex_buffer_desc.CPUAccessFlags = 0;
vertex_buffer_desc.MiscFlags = 0;
vertex_buffer_desc.StructureByteStride = 0;
DirectX::XMFLOAT3 vertices[4];
vertices[0].x = -0.5; vertices[0].y = -0.5; vertices[0].z = 0;
vertices[1].x = -0.5; vertices[1].y = 0.5; vertices[1].z = 0;
vertices[2].x = 0.5; vertices[2].y = 0.5; vertices[2].z = 0;
vertices[3].x = 0.5; vertices[3].y = -0.5; vertices[3].z = 0;
D3D11_SUBRESOURCE_DATA vertex_buffer_data;
vertex_buffer_data.pSysMem = vertices;
vertex_buffer_data.SysMemPitch = 0;
vertex_buffer_data.SysMemSlicePitch = 0;
hr = d3d11_device->CreateBuffer(&vertex_buffer_desc, &vertex_buffer_data, &vertex_buffer);
D3D11_BUFFER_DESC index_buffer_desc;
index_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
index_buffer_desc.ByteWidth = sizeof(int32_t) * 6;
index_buffer_desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
index_buffer_desc.CPUAccessFlags = 0;
index_buffer_desc.MiscFlags = 0;
index_buffer_desc.StructureByteStride = 0;
int32_t indices[6];
indices[0] = 0;
indices[1] = 1;
indices[2] = 2;
indices[3] = 2;
indices[4] = 3;
indices[5] = 0;
D3D11_SUBRESOURCE_DATA index_buffer_data;
index_buffer_data.pSysMem = indices;
index_buffer_data.SysMemPitch = 0;
index_buffer_data.SysMemSlicePitch = 0;
hr = d3d11_device->CreateBuffer(&index_buffer_desc, &index_buffer_data, &index_buffer);
// during rendering I set:
unsigned int stride = sizeof(DirectX::XMFLOAT3);
unsigned int offset = 0;
d3d11_context->IASetVertexBuffers(0, 1, &vertex_buffer, &stride, &offset);
d3d11_context->IASetIndexBuffer(index_buffer, DXGI_FORMAT_R32_UINT, 0);
d3d11_context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3d11_context->RSSetState(rasterizer_state);
d3d11_context->IASetInputLayout(input_layout);
d3d11_context->VSSetShader(vertex_shader, NULL, 0);
d3d11_context->PSSetShader(pixel_shader, NULL, 0);
// and render with:
d3d11_context->DrawIndexed(6, 0, 0);
When I look at the shaders with the ID3D11ShaderReflection::GetGSInputPrimitive(), I receive D3D_PRIMITIVE_UNDEFINED for both the vertex shader and pixel shader.
I am setting the rasterizer stage with D3D11_FILL_SOLID and D3D11_CULL_NONE.
Is there any setting or state in the D3D11 context that could explain such a behavior?
I'm happy for any ideas where to look.
Thanks in advance!

Firstly, triangle strip draws exactly what you'd expect - a sequence of triangles. Each index into the triangle index array is combined with the two previous indices to create a triangle.
I'd suggest that as your Triangle List is not divisible by 3 DirectX may be rendering incorrectly (remember that as this is a high-capacity system it skips checks and balances where it can to promote speed).
Try to draw your expected results on paper after reviewing the logic behind each of the draw modes - list, strip, fan etc to be sure that you are using the correct vertex ordering and drawmode.
Good luck!

It turns out that the code was not the problem. Somewhere prior something was changed in the Direct3D state.
Calling context->ClearState(); solved the issue.

Related

DirectX 11 Not Drawing Small Vertex Buffer

I have a problem with my DirectX app. It can't draw primitives like D3D11_PRIMITIVE_TOPOLOGY_POINTLIST or D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST when vertices count relatively small value(3 Points to 100 points). Over thousand vertices, it suddenly draws lots of primitives. But not sure it draws them all.
My code looks likes this.
Main Code:
Fill Vertex Array
OurVertices = (VERTEX*)malloc(PointCount * sizeof(VERTEX));
for (int i = 0; i < PointCount; i++)
{
OurVertices[i] = { RandOm() * i,RandOm() * i ,1.0f ,{abs(RandOm()),abs(RandOm()),abs(RandOm()),1.0f} };
}
RandOm():Random value from 0.0f to 1.0f. Multiplied by i to get some realistic world coordinates.
Vertex Buffer
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(VERTEX)*PointCount;
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
dev->CreateBuffer(&bd, NULL, &pVBuffer);
VERTEX: struct VERTEX { FLOAT X, Y, Z; FLOAT Color[4]; };
Mapping
devcon->Map(pVBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms);
memcpy(ms.pData, OurVertices, PointCount* sizeof(VERTEX));
devcon->Unmap(pVBuffer, NULL);
devcon: ID3D11DeviceContext
Render
float color[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
devcon->ClearRenderTargetView(backbuffer,color);
UINT stride = sizeof(VERTEX);
UINT offset = 0;
devcon->IASetVertexBuffers(0, 1, &pVBuffer, &stride, &offset);
devcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
devcon->Draw(PointCount, 0);
swapchain->Present(0, 0);
Shader
VOut VShader(float4 position : POSITION, float4 color : COLOR) {
VOut output;
output.position = mul(world, position);
output.color = color;
return output; }
float4 PShader(float4 position : SV_POSITION, float4 color : COLOR) : SV_TARGET {
return color; }
XMMatrixOrthographicOffCenterLH calculated applied using CBuffer as can be seen on shader world variable. I think Orthographic projection works fine from my visual observations. There are neither CPU or GPU exceptions. And no warnings.
Rasterizer
D3D11_RASTERIZER_DESC RasterDesc = {};
RasterDesc.FillMode = D3D11_FILL_SOLID;
RasterDesc.CullMode = D3D11_CULL_NONE;
RasterDesc.DepthClipEnable = TRUE;
ID3D11RasterizerState* WireFrame=NULL;
dev->CreateRasterizerState(&RasterDesc, &WireFrame);
devcon->RSSetState(WireFrame);
Orthographic Projection
ID3D11Buffer* g_pConstantBuffer11 = NULL;
cbuff.world = XMMatrixOrthographicOffCenterLH(SceneY - (ViewPortWidth / 2)
* SceneZoom, SceneY + (ViewPortWidth / 2) * SceneZoom,
SceneX - (ViewPortHeight / 2) * SceneZoom, SceneX + (ViewPortHeight /
2) * SceneZoom,-10000.0f, 10000.0f);
D3D11_BUFFER_DESC cbDesc;
cbDesc.ByteWidth = sizeof(CBUFFER);
cbDesc.Usage = D3D11_USAGE_DYNAMIC;
cbDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
cbDesc.MiscFlags = 0;
cbDesc.StructureByteStride = 0;
D3D11_SUBRESOURCE_DATA InitData;
InitData.pSysMem = &cbuff;
InitData.SysMemPitch = 0;
InitData.SysMemSlicePitch = 0;
dev->CreateBuffer(&cbDesc, &InitData,&g_pConstantBuffer11);
devcon->VSSetConstantBuffers(0, 1, &g_pConstantBuffer11);

DirectX11 Offscreen rendering: output image is flipepd

I'm making my graphics engine. I want to have the ability to write it on C++, but create UI for an editor on C#. So with some defines, I disable rendering to a window and trying to do the off-screen rendering to pass then data to c# but I have a problem, I'm understanding why it's happening (it's how DirectX create textures and stores them) but have no clue how to fix it. So here are the results.
Imaged rendered to the window:
Image rendered to bmp (flipped):
On the first image, all looks good, and on the second as you can see I have flipped Y, and maybe X (not sure) coordinates. And for maybe useful information, I represent normals as color.
Here is my code
Vertex Buffer
cbuffer Transformation : register(b0) {
matrix transformation;
};
cbuffer ViewProjection : register(b1) {
matrix projection;
matrix view;
};
struct VS_OUT {
float2 texcoord : TextureCoordinate;
float3 normal : Normal;
float4 position : SV_Position;
};
VS_OUT main(float3 position : Position, float3 normal : Normal, float2 texcoord : TextureCoordinate) {
matrix tView = transpose(view);
matrix tProjection = transpose(projection);
matrix tTransformation = transpose(transformation);
matrix MVP = mul(tTransformation, mul(tView, tProjection));
VS_OUT result;
result.position = mul(float4(position, 1.0f), MVP);
result.texcoord = texcoord;
result.normal = normal;
return result;
}
Pixel buffer
float4 main(float2 texcoord : TextureCoordinate, float3 normal : Normal) : SV_Target
{
float3 color = (normal + 1) * 0.5f;
return float4(color.rgb, 1.0f);
}
DirectX code for offscreen rendering initialization
D3D_FEATURE_LEVEL FeatureLevels[] = {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
UINT deviceFlags = 0;
#if defined(DEBUG) || defined(_DEBUG)
deviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
#endif
DirectX11Call(D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
deviceFlags,
FeatureLevels,
ARRAYSIZE(FeatureLevels),
D3D11_SDK_VERSION,
&m_Device,
&m_FeatureLevel,
&m_DeviceContext
))
D3D11_TEXTURE2D_DESC renderingDescription = {};
renderingDescription.Width = width;
renderingDescription.Height = height;
renderingDescription.ArraySize = 1;
renderingDescription.SampleDesc.Count = 1;
renderingDescription.Usage = D3D11_USAGE_DEFAULT;
renderingDescription.BindFlags = D3D11_BIND_RENDER_TARGET;
renderingDescription.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
m_Device->CreateTexture2D(&renderingDescription, nullptr, &m_Target);
renderingDescription.BindFlags = 0;
renderingDescription.Usage = D3D11_USAGE_STAGING;
renderingDescription.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
m_Device->CreateTexture2D(&renderingDescription, nullptr, &m_Output);
DirectX11Call(m_Device->CreateRenderTargetView(m_Target.Get(), nullptr, &m_RenderTargetView))
D3D11_DEPTH_STENCIL_DESC depthStencilStateDescription = {};
depthStencilStateDescription.DepthEnable = TRUE;
depthStencilStateDescription.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
depthStencilStateDescription.DepthFunc = D3D11_COMPARISON_LESS;
Microsoft::WRL::ComPtr<ID3D11DepthStencilState> depthStencilState;
DirectX11Call(m_Device->CreateDepthStencilState(&depthStencilStateDescription, &depthStencilState))
m_DeviceContext->OMSetDepthStencilState(depthStencilState.Get(), 0);
D3D11_TEXTURE2D_DESC depthStencilDescription = {};
depthStencilDescription.Width = width;
depthStencilDescription.Height = height;
depthStencilDescription.MipLevels = 1;
depthStencilDescription.ArraySize = 1;
depthStencilDescription.Format = DXGI_FORMAT_D32_FLOAT;
depthStencilDescription.SampleDesc.Count = 1;
depthStencilDescription.SampleDesc.Quality = 0;
depthStencilDescription.Usage = D3D11_USAGE_DEFAULT;
depthStencilDescription.BindFlags = D3D11_BIND_DEPTH_STENCIL;
depthStencilDescription.CPUAccessFlags = 0;
depthStencilDescription.MiscFlags = 0;
DirectX11Call(m_Device->CreateTexture2D(&depthStencilDescription, nullptr, &m_DepthStencilBuffer))
D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDescription = {};
depthStencilViewDescription.Format = DXGI_FORMAT_D32_FLOAT;
depthStencilViewDescription.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
depthStencilViewDescription.Texture2D.MipSlice = 0;
DirectX11Call(m_Device->CreateDepthStencilView(m_DepthStencilBuffer.Get(), &depthStencilViewDescription, &m_DepthStencilView))
m_DeviceContext->OMSetRenderTargets(1, m_RenderTargetView.GetAddressOf(), m_DepthStencilView.Get());
m_DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
D3D11_VIEWPORT viewPort;
viewPort.TopLeftX = 0.0f;
viewPort.TopLeftY = 0.0f;
viewPort.Width = static_cast<float>(width);
viewPort.Height = static_cast<float>(height);
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
m_DeviceContext->RSSetViewports(1, &viewPort);
D3D11_RASTERIZER_DESC rasterizerDescription = {};
rasterizerDescription.FillMode = D3D11_FILL_SOLID;
rasterizerDescription.CullMode = D3D11_CULL_FRONT;
Microsoft::WRL::ComPtr<ID3D11RasterizerState> rasterizerState;
DirectX11Call(m_Device->CreateRasterizerState(&rasterizerDescription, &rasterizerState))
m_DeviceContext->RSSetState(rasterizerState.Get());
Code for drawing to texture
m_DeviceContext->Flush();
m_DeviceContext->CopyResource(m_Output.Get(), m_Target.Get());
static const UINT resource_id = D3D11CalcSubresource(0, 0, 0);
m_DeviceContext->Map(m_Output.Get(), resource_id, D3D11_MAP_READ, 0, &m_OutputResource);
The difference between rendering to a window is that I'm also creating swapchain. So my question is how can I fix it (flipping on CPU bad solution and it may cause problems with shaders like in this example where I have different color for sphere)

Texture shows as black

I am trying to make a texture to cover a triangle instead of color, but it looks like there is something wrong with the Texture Initialization...
Texture loading is fine, I tried it with Opengl and It worked perfectly however it isn't working in Directx 11.
void DX11Texture2D::Create(Texture_Data Data, Texture_Desc Desc)
{
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(texDesc));
texDesc.Width = Data.width;
texDesc.Height = Data.height;
texDesc.MipLevels = 0;
texDesc.ArraySize = 1;
texDesc.Format = GetDXTextureFormat(Desc.Format);
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA subData;
subData.pSysMem = Data.databuf;
subData.SysMemPitch = Data.width * 4;
subData.SysMemSlicePitch = Data.width * Data.height * 4;
Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
//TODO: Add a way to disable and enable mip map
Core::Internals::DX11Renderer::GetDevice()->CreateShaderResourceView(textureID, 0, &resourceView);
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.AddressU = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressV = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressW = GetDXTextureWrap(Desc.Wrap);
samplerDesc.Filter = GetDXTextureFilter(Desc.Filter);
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
Core::Internals::DX11Renderer::GetDevice()->CreateSamplerState(&samplerDesc, &samplerState);
}
void DX11Texture2D::Bind(unsigned int index)
{
Core::Internals::DX11Renderer::GetContext()->PSSetShaderResources(index, 1, &resourceView);
Core::Internals::DX11Renderer::GetContext()->PSSetSamplers(index, 1, &samplerState);
}
DXGI_FORMAT DX11Texture2D::GetDXTextureFormat(API::TextureFormat format)
{
switch (format)
{
case API::TextureFormat::R8: return DXGI_FORMAT_R8_UNORM;
case API::TextureFormat::R8G8: return DXGI_FORMAT_R8G8_UNORM;
case API::TextureFormat::R8G8B8: return DXGI_FORMAT_R8G8B8A8_UNORM;
case API::TextureFormat::R8G8B8A8: return DXGI_FORMAT_R8G8B8A8_UNORM;
default: return DXGI_FORMAT_R8G8B8A8_UNORM;
}
}
D3D11_TEXTURE_ADDRESS_MODE DX11Texture2D::GetDXTextureWrap(API::TextureWrap textureWrap)
{
switch (textureWrap)
{
case API::TextureWrap::Repeat: return D3D11_TEXTURE_ADDRESS_WRAP;
case API::TextureWrap::MirroredReapeat: return D3D11_TEXTURE_ADDRESS_MIRROR;
case API::TextureWrap::ClampToEdge: return D3D11_TEXTURE_ADDRESS_CLAMP;
case API::TextureWrap::ClampToBorder: return D3D11_TEXTURE_ADDRESS_BORDER;
default: return D3D11_TEXTURE_ADDRESS_WRAP;
}
}
D3D11_FILTER DX11Texture2D::GetDXTextureFilter(API::TextureFilter textureFilter)
{
//TODO: Add more texture filter types to control both min and mag
switch (textureFilter)
{
case API::TextureFilter::Nearest: return D3D11_FILTER_MIN_MAG_MIP_POINT;
case API::TextureFilter::Linear: return D3D11_FILTER_MIN_MAG_MIP_LINEAR;
default: return D3D11_FILTER_MIN_MAG_MIP_POINT;
}
}
Also my shaders
Triangle.vs:
struct VertexInputType
{
float3 position : POSITION;
float3 color : COLOR;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = float4(input.position, 1);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
}
Triangle.ps
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D shaderTexture : register(t0);;
SamplerState SampleType : register(s0);;
float4 main(PixelInputType input) : SV_TARGET
{
return shaderTexture.Sample(SampleType, input.tex);
}
Cheers,
Zlixine.
It seems you are creating a texture with a full mip map chain, but looking at initial data it looks like you provide only one slice (if your sampler tries to access another slice, since no data was provided, it will be black)
In that case you should enforce single mip by using :
texDesc.MipLevels = 1;
The line:
subData.SysMemSlicePitch = Data.width * Data.height * 4;
is not needed, as this parameter is ignored in case of 2d texture (you can leave it to 0 or any value).
Also, you should check result codes when creating resources eg:
HRESULT hr = Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
if FAILED(hr)
{
//Handle issue if texture creation did fail
}
And make sure to use D3D11CreateDevice with D3D11_CREATE_DEVICE_DEBUG,
so when debugging, you will have meaningful error messages in your visual studio output window (if creation fails, you will have an explanation instead of getting only INVALIDARG hresult, which is not really useful)

DirectX 11: Model not rendering from Model class

Ok! Here goes. I've updated my code. However, after hours of debugging seemingly perfect code, I can't spot the problem. I've set up multiple breakpoints around the Vertex and Index buffer creation, and the class draw call.
I've created a temporary vtest struct for the purposes of testing. It carries the definition.
struct vtest{
XMFLOAT3 Vertex;
XMFLOAT4 Color;
};
Linked to a proper IA abstraction:
D3D11_INPUT_ELEMENT_DESC InputElementDesc[] = {
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
(All of which returns a (HRESULT) S_OK)
D3D11_BUFFER_DESC BufferDescription;
ZeroMemory(&BufferDescription, sizeof(BufferDescription));
BufferDescription.Usage = D3D11_USAGE_DYNAMIC;
BufferDescription.ByteWidth = sizeof(vtest) * sz_vBuffer;
BufferDescription.BindFlags = D3D11_BIND_VERTEX_BUFFER;
BufferDescription.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
BufferDescription.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA SRData;
ZeroMemory(&SRData, sizeof(SRData));
SRData.pSysMem = test;
SRData.SysMemPitch = 0;
SRData.SysMemSlicePitch = 0;
hr = Device->CreateBuffer(&BufferDescription, &SRData, &g_vBuffer);
D3D11_MAPPED_SUBRESOURCE MappedResource;
ZeroMemory(&MappedResource, sizeof(MappedResource));
The vtest struct fills proper, and:
DeviceContext->Map(g_vBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &MappedResource);
Succeeds, also with (HRESULT) S_OK.
Indices initialized as such:(One-dimensional DWORD array of indices.)
D3D11_BUFFER_DESC iBufferDescription;
ZeroMemory(&iBufferDescription, sizeof(iBufferDescription));
iBufferDescription.Usage = D3D11_USAGE_DEFAULT;
iBufferDescription.ByteWidth = sizeof(DWORD)*sz_iBuffer;
iBufferDescription.BindFlags = D3D11_BIND_INDEX_BUFFER;
iBufferDescription.CPUAccessFlags = NULL;
iBufferDescription.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA iSRData;
iSRData.pSysMem = Indices;
hr = direct3D.Device->CreateBuffer(&iBufferDescription, &iSRData, &g_iBuffer);
The IA Set... calls are in the draw() call:
DeviceContext->IASetVertexBuffers(0, 1, &g_vBuffer, &stride, &Offset);
DeviceContext->IASetIndexBuffer(g_iBuffer, DXGI_FORMAT_R32_UINT, 0);
Other settings: (Edit: Corrected values to show configuration.)
D3D11_RASTERIZER_DESC DrawStyleState;
DrawStyleState.AntialiasedLineEnable = false;
DrawStyleState.CullMode = D3D11_CULL_NONE;
DrawStyleState.DepthBias = 0;
DrawStyleState.FillMode = D3D11_FILL_SOLID;
DrawStyleState.DepthClipEnable = false;
DrawStyleState.MultisampleEnable = true;
DrawStyleState.FrontCounterClockwise = false;
DrawStyleState.ScissorEnable = false;
My Depth Stencil code.
D3D11_TEXTURE2D_DESC DepthStenDescription;
ZeroMemory(&DepthStenDescription, sizeof(D3D11_TEXTURE2D_DESC));
DepthStenDescription.Width = cWidth;
DepthStenDescription.Height = cHeight;
DepthStenDescription.MipLevels = 0;
DepthStenDescription.ArraySize = 1;
DepthStenDescription.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
DepthStenDescription.SampleDesc.Count = 1;
DepthStenDescription.SampleDesc.Quality = 0;
DepthStenDescription.Usage = D3D11_USAGE_DEFAULT;
DepthStenDescription.BindFlags = D3D11_BIND_DEPTH_STENCIL;
DepthStenDescription.CPUAccessFlags = 0;
DepthStenDescription.MiscFlags = 0;
D3D11_DEPTH_STENCIL_VIEW_DESC DSVDesc;
ZeroMemory(&DSVDesc, sizeof(D3D11_DEPTH_STENCIL_VIEW_DESC));
DSVDesc.Format = DSVDesc.Format;
DSVDesc.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
DSVDesc.Texture2D.MipSlice = 0;
And finally, my entity class draw() method:
void Entity::Draw(){
UINT stride = sizeof(vtest);
UINT Offset = 0;
ObjectSpace = XMMatrixIdentity();
m_Scale = Scale();
m_Rotation = Rotate();
m_Translate = Translate();
ObjectSpace = m_Scale*m_Rotation*m_Translate;
mWVP = ObjectSpace*direct3D.mView*direct3D.mProjection;
LocalWorld.mWorldVP = XMMatrixTranspose(wWVP);
DeviceContext->UpdateSubresource(direct3D.MatrixBuffer, 0, NULL, &LocalWorld, 0, 0);
DeviceContext->VSSetConstantBuffers(0, 1, &direct3D.MatrixBuffer);
DeviceContext->IASetVertexBuffers(0, 1, &g_vBuffer, &stride, &Offset);
DeviceContext->IASetIndexBuffer(g_iBuffer, DXGI_FORMAT_R32_UINT, 0);
DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
DeviceContext->DrawIndexed(e_Asset.sz_Index, 0, 0);
}
The code compiles, and the backbuffer presents correctly, but no model.
Initialization of DirectX functions seem to be fine too...
Update From Banex's suggestion, using the Visual Studio DirectX Debugging tools yield that I may have gone wrong in my .hlsl file.
I think also I may have gone wrong at shader initialization, since my shader really is simple, and really works as a vert/pix pass-through file:
After examining the .hlsl file and doing further debugging; setting output.position = position; rather than being multiplied by the world matrix, the model was drawn on screen, implying a bad matrix calculation causing an extreme warp, or null values, stored in the constant buffer.
cbuffer ConstantBuffer:register(b0)
{
float4x4 WVP;
}
struct VOut
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
VOut VShader(float4 position : POSITION, float4 color : COLOR)
{
VOut output;
output.position = position;// mul(position, WVP);
output.color = color;
return output;
}
float4 PShader(float4 position : SV_POSITION, float4 color : COLOR) : SV_TARGET
{
return color;
}

Using unsigned byte textures with DirectX 10 / 11

I am attempting to do some processing in the pixel shader on a texture. The data for the texture is coming from a memory chunk of 8 bit data. The problem I am facing is how to read the data in the shader.
Code to create the texture and ressource view:
In OnD3D11CreateDevice:
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Height = 480;
tDesc.Width = 640;
tDesc.Usage = D3D11_USAGE_DYNAMIC;
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Format = DXGI_FORMAT_R8_UINT;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tDesc.MiscFlags = 0;
V_RETURN(pd3dDevice->CreateTexture2D(&tDesc, NULL, &g_pCurrentImage));
D3D11_SHADER_RESOURCE_VIEW_DESC rvDesc;
g_pCurrentImage->GetDesc(&tDesc);
rvDesc.Format = DXGI_FORMAT_R8_UINT;
rvDesc.Texture2D.MipLevels = tDesc.MipLevels;
rvDesc.Texture2D.MostDetailedMip = tDesc.MipLevels - 1;
rvDesc.ViewDimension = D3D_SRV_DIMENSION_TEXTURE2D;
V_RETURN(pd3dDevice->CreateShaderResourceView(g_pCurrentImage, &rvDesc, &g_pImageRV)); </code>
in OnD3D11FrameRender:
HRESULT okay;
if( !g_updateDone ) {
D3D11_MAPPED_SUBRESOURCE resource;
resource.pData = mImage.GetData();
resource.RowPitch = 640;
resource.DepthPitch = 1;
okay = pd3dImmediateContext->Map(g_pCurrentImage, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
g_updateDone = true;
}
pd3dImmediateContext->PSSetShaderResources(0, 1, &g_pImageRV);
This returns no errors so far, everything seems to work.
The HLSL Shader:
//-----
// Textures and Samplers
//-----
Texture2D <int> g_txDiffuse : register( t0 );
SamplerState g_samLinear : register( s0 );
//-----
// shader input/output structure
//-----
struct VS_INPUT
{
float4 Position : POSITION; // vertex position
float2 TextureUV : TEXCOORD0;// vertex texture coords
};
struct VS_OUTPUT
{
float4 Position : SV_POSITION; // vertex position
float2 TextureUV : TEXCOORD0; // vertex texture coords
};
//-----
// Vertex shader
//-----
VS_OUTPUT RenderSceneVS( VS_INPUT input )
{
VS_OUTPUT Output;
Output.Position = input.Position;
Output.TextureUV = input.TextureUV;
return Output;
}
//-----
// Pixel Shader
//-----
float4 RenderScenePS( VS_OUTPUT In ) : SV_TARGET
{
int3 loc;
loc.x = 0;
loc.y = 0;
loc.z = 1;
int r = g_txDiffuse.Load(loc);
//float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, 1);
}
The thing is, I can't even debug it in PIX to see what r results in, because even with Shader optimization disabled, the line int r = ... is never reached
I tested
float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, fTest);
but this would result in "cannot map expression to pixel shader instruction set", even though it's a float.
So how do I read and use 8bit integers from a texture, and if possible, with no sampling at all.
Thanks for any feedback.
Oh my this is a really old question, I thought it said 2012!
But anyway as it's still open:
Due to the nature of GPU's being optimised for floating point arithmetic, you probably wont get a great deal of performance advantage by using a Texture2D<int> over a Texture2D<float>.
You could attempt to use a Texture2D<float> and then try:
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, g_txDiffuse.Load(loc));
loc.z = 1;
Should be 0 here, because texture mip levels is 1 in your case, and mipmaps start at 0 in HLSL for Load intrinsic.