3DTexture-based Volume Rendering - c++

I've been trying to get a series of 2D images, all same in size and formatted as a raw data, loaded into a 3D texture and then render that 3D texture on the screen. As I'm still pretty new with DirectX, I'm not sure what I'm doing wrong. Here's the relevant code:
// Open the image and copy its contents into a buffer
std::ifstream image("head256x256x109", std::ifstream::binary);
int size;
char* buffer;
if (image.is_open()) {
image.seekg(0, image.end);
size = image.tellg();
image.seekg(0, image.beg);
buffer = new char[size];
image.read(buffer, size);
image.close();
} else {
return false;
}
// Convert the data to RGBA data
// Here we are simply putting the same value to R, G, B and A channels.
char* RGBABuffer = new char[ size*4 ];
for( int nIndx = 0; nIndx < size; ++nIndx )
{
RGBABuffer[nIndx*4] = buffer[nIndx];
RGBABuffer[nIndx*4+1] = buffer[nIndx];
RGBABuffer[nIndx*4+2] = buffer[nIndx];
RGBABuffer[nIndx*4+3] = buffer[nIndx];
}
// Create the texture
D3D11_TEXTURE3D_DESC texture_desc;
ZeroMemory(&texture_desc, sizeof(texture_desc));
texture_desc.Width = 256;
texture_desc.Height = 256;
texture_desc.Depth = 109;
texture_desc.MipLevels = 1;
texture_desc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
texture_desc.Usage = D3D11_USAGE_DEFAULT;
texture_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
D3D11_SUBRESOURCE_DATA image_data;
ZeroMemory(&image_data, sizeof(image_data));
image_data.pSysMem = RGBABuffer;
image_data.SysMemPitch = 256 * 4;
image_data.SysMemSlicePitch = 256 * 256 * 4;
d3dResult = d3dDevice_->CreateTexture3D(&texture_desc, &image_data, &texture_);
// Create a sampler
D3D11_SAMPLER_DESC colorMapDesc;
ZeroMemory( &colorMapDesc, sizeof( colorMapDesc ) );
colorMapDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
colorMapDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
colorMapDesc.MaxLOD = D3D11_FLOAT32_MAX;
d3dResult = d3dDevice_->CreateSamplerState( &colorMapDesc, &colorMapSampler_ );
Shader code is as simple as possible:
Texture3D colorMap_ : register( t0 );
SamplerState colorSampler_ : register( s0 );
struct VS_Input
{
float4 pos : POSITION;
float4 tex0 : TEXCOORD0;
};
struct PS_Input
{
float4 pos : SV_POSITION;
float4 tex0 : TEXCOORD0;
};
PS_Input VS_Main( VS_Input vertex )
{
PS_Input vsOut = ( PS_Input )0;
vsOut.pos = vertex.pos;
vsOut.tex0 = vertex.tex0;
return vsOut;
}
float4 PS_Main( PS_Input frag ) : SV_TARGET
{
return colorMap_.Sample( colorSampler_, frag.tex0 );
}
Now I know that I would need to implement some sort of alpha blending for this to work properly, but I hoped I would at least get a first slice of the picture shown with this. However, all I get is the background color that I set in my Render function:
float clearColor[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
d3dContext_->ClearRenderTargetView( backBufferTarget_, clearColor );
Any help on what I've might be doing wrong as well as any further tips that I would need to do to achieve this goal (making a 3D texture out of a series of images and render it on screen) would be hugely helpful as there is very little documentation about this on the web.

Related

How can I change the texture position in DirectX 12

I need to change the position of the texture in the window, I have it drawn in the upper left corner, and I want to place it in the center. In directx 11 we can do this
m_deviceResources->GetD2DDeviceContext()->SetTransform(...), but I haven't found any similar code for directx 12. I don't understand how i can change the texture position, maybe I need to change something in ConstantBuffer or change the code for the shader.
Texture Code
void CreateTexture(int width, int height, const void* pData)
{
D3D12_RESOURCE_DESC resourceDesc = {};
resourceDesc.MipLevels = 1;
resourceDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
resourceDesc.Width = width;
resourceDesc.Height = height;
resourceDesc.Flags = D3D12_RESOURCE_FLAG_NONE;
resourceDesc.DepthOrArraySize = 1;
resourceDesc.SampleDesc.Count = 1;
resourceDesc.SampleDesc.Quality = 0;
resourceDesc.Dimension = D3D12_RESOURCE_DIMENSION_TEXTURE2D;
D3D12MA::ALLOCATION_DESC allocationDesc = {};
allocationDesc.HeapType = D3D12_HEAP_TYPE_DEFAULT;
D3D12MA::Allocation* alloc = nullptr;
allocator->CreateResource(
&allocationDesc,
&resourceDesc,
D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE,
nullptr,
&alloc,
IID_PPV_ARGS(&OffscreenTexture));
UpdateTextureResource(width, height, pData);
// Describe and create a SRV for the texture.
D3D12_SHADER_RESOURCE_VIEW_DESC srvDesc = {};
srvDesc.Shader4ComponentMapping = D3D12_DEFAULT_SHADER_4_COMPONENT_MAPPING;
srvDesc.Format = resourceDesc.Format;
srvDesc.ViewDimension = D3D12_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = 1;
Device->CreateShaderResourceView(OffscreenTexture, &srvDesc, OffscreenSrvHeap->GetCPUDescriptorHandleForHeapStart());
alloc->Release();
}
void UpdateTextureResource(int width, int height, const void* pData)
{
if (pData == nullptr)
return;
if (upload_texture)
upload_texture->Release();
auto Barrier = CD3DX12_RESOURCE_BARRIER::Transition(OffscreenTexture, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE, D3D12_RESOURCE_STATE_COPY_DEST);
CommandList->ResourceBarrier(1, &Barrier);
const UINT64 uploadBufferSize = GetRequiredIntermediateSize(OffscreenTexture, 0, 1);
D3D12_RESOURCE_DESC desc = CD3DX12_RESOURCE_DESC::Buffer(uploadBufferSize);
D3D12MA::ALLOCATION_DESC allocationDesc = {};
allocationDesc.HeapType = D3D12_HEAP_TYPE_UPLOAD;
allocator->CreateResource(
&allocationDesc,
&desc,
D3D12_RESOURCE_STATE_GENERIC_READ,
NULL,
&upload_texture,
__uuidof(ID3D12Resource),
nullptr);
if (upload_texture == nullptr)
return;
D3D12_SUBRESOURCE_DATA textureData = {};
textureData.pData = pData;
textureData.RowPitch = width * 4; // TextureWidth * TextureSize
textureData.SlicePitch = textureData.RowPitch * height;
UpdateSubresources(CommandList, OffscreenTexture, upload_texture->GetResource(), 0, 0, 1, &textureData);
auto barrier = CD3DX12_RESOURCE_BARRIER::Transition(OffscreenTexture, D3D12_RESOURCE_STATE_COPY_DEST, D3D12_RESOURCE_STATE_PIXEL_SHADER_RESOURCE);
CommandList->ResourceBarrier(1, &barrier);
}
Shader Code
struct PSInput
{
float4 position : SV_POSITION;
float2 uv : TEXCOORD;
};
Texture2D g_texture : register(t0);
SamplerState g_sampler : register(s0);
PSInput VSMain(float4 position : POSITION, float4 uv : TEXCOORD)
{
PSInput result;
result.position = position;
result.uv = uv;
return result;
}
float4 PSMain(PSInput input) : SV_TARGET
{
return g_texture.Sample(g_sampler, input.uv);
}

DirectX11 Offscreen rendering: output image is flipepd

I'm making my graphics engine. I want to have the ability to write it on C++, but create UI for an editor on C#. So with some defines, I disable rendering to a window and trying to do the off-screen rendering to pass then data to c# but I have a problem, I'm understanding why it's happening (it's how DirectX create textures and stores them) but have no clue how to fix it. So here are the results.
Imaged rendered to the window:
Image rendered to bmp (flipped):
On the first image, all looks good, and on the second as you can see I have flipped Y, and maybe X (not sure) coordinates. And for maybe useful information, I represent normals as color.
Here is my code
Vertex Buffer
cbuffer Transformation : register(b0) {
matrix transformation;
};
cbuffer ViewProjection : register(b1) {
matrix projection;
matrix view;
};
struct VS_OUT {
float2 texcoord : TextureCoordinate;
float3 normal : Normal;
float4 position : SV_Position;
};
VS_OUT main(float3 position : Position, float3 normal : Normal, float2 texcoord : TextureCoordinate) {
matrix tView = transpose(view);
matrix tProjection = transpose(projection);
matrix tTransformation = transpose(transformation);
matrix MVP = mul(tTransformation, mul(tView, tProjection));
VS_OUT result;
result.position = mul(float4(position, 1.0f), MVP);
result.texcoord = texcoord;
result.normal = normal;
return result;
}
Pixel buffer
float4 main(float2 texcoord : TextureCoordinate, float3 normal : Normal) : SV_Target
{
float3 color = (normal + 1) * 0.5f;
return float4(color.rgb, 1.0f);
}
DirectX code for offscreen rendering initialization
D3D_FEATURE_LEVEL FeatureLevels[] = {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
UINT deviceFlags = 0;
#if defined(DEBUG) || defined(_DEBUG)
deviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
#endif
DirectX11Call(D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
deviceFlags,
FeatureLevels,
ARRAYSIZE(FeatureLevels),
D3D11_SDK_VERSION,
&m_Device,
&m_FeatureLevel,
&m_DeviceContext
))
D3D11_TEXTURE2D_DESC renderingDescription = {};
renderingDescription.Width = width;
renderingDescription.Height = height;
renderingDescription.ArraySize = 1;
renderingDescription.SampleDesc.Count = 1;
renderingDescription.Usage = D3D11_USAGE_DEFAULT;
renderingDescription.BindFlags = D3D11_BIND_RENDER_TARGET;
renderingDescription.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
m_Device->CreateTexture2D(&renderingDescription, nullptr, &m_Target);
renderingDescription.BindFlags = 0;
renderingDescription.Usage = D3D11_USAGE_STAGING;
renderingDescription.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
m_Device->CreateTexture2D(&renderingDescription, nullptr, &m_Output);
DirectX11Call(m_Device->CreateRenderTargetView(m_Target.Get(), nullptr, &m_RenderTargetView))
D3D11_DEPTH_STENCIL_DESC depthStencilStateDescription = {};
depthStencilStateDescription.DepthEnable = TRUE;
depthStencilStateDescription.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
depthStencilStateDescription.DepthFunc = D3D11_COMPARISON_LESS;
Microsoft::WRL::ComPtr<ID3D11DepthStencilState> depthStencilState;
DirectX11Call(m_Device->CreateDepthStencilState(&depthStencilStateDescription, &depthStencilState))
m_DeviceContext->OMSetDepthStencilState(depthStencilState.Get(), 0);
D3D11_TEXTURE2D_DESC depthStencilDescription = {};
depthStencilDescription.Width = width;
depthStencilDescription.Height = height;
depthStencilDescription.MipLevels = 1;
depthStencilDescription.ArraySize = 1;
depthStencilDescription.Format = DXGI_FORMAT_D32_FLOAT;
depthStencilDescription.SampleDesc.Count = 1;
depthStencilDescription.SampleDesc.Quality = 0;
depthStencilDescription.Usage = D3D11_USAGE_DEFAULT;
depthStencilDescription.BindFlags = D3D11_BIND_DEPTH_STENCIL;
depthStencilDescription.CPUAccessFlags = 0;
depthStencilDescription.MiscFlags = 0;
DirectX11Call(m_Device->CreateTexture2D(&depthStencilDescription, nullptr, &m_DepthStencilBuffer))
D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDescription = {};
depthStencilViewDescription.Format = DXGI_FORMAT_D32_FLOAT;
depthStencilViewDescription.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
depthStencilViewDescription.Texture2D.MipSlice = 0;
DirectX11Call(m_Device->CreateDepthStencilView(m_DepthStencilBuffer.Get(), &depthStencilViewDescription, &m_DepthStencilView))
m_DeviceContext->OMSetRenderTargets(1, m_RenderTargetView.GetAddressOf(), m_DepthStencilView.Get());
m_DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
D3D11_VIEWPORT viewPort;
viewPort.TopLeftX = 0.0f;
viewPort.TopLeftY = 0.0f;
viewPort.Width = static_cast<float>(width);
viewPort.Height = static_cast<float>(height);
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
m_DeviceContext->RSSetViewports(1, &viewPort);
D3D11_RASTERIZER_DESC rasterizerDescription = {};
rasterizerDescription.FillMode = D3D11_FILL_SOLID;
rasterizerDescription.CullMode = D3D11_CULL_FRONT;
Microsoft::WRL::ComPtr<ID3D11RasterizerState> rasterizerState;
DirectX11Call(m_Device->CreateRasterizerState(&rasterizerDescription, &rasterizerState))
m_DeviceContext->RSSetState(rasterizerState.Get());
Code for drawing to texture
m_DeviceContext->Flush();
m_DeviceContext->CopyResource(m_Output.Get(), m_Target.Get());
static const UINT resource_id = D3D11CalcSubresource(0, 0, 0);
m_DeviceContext->Map(m_Output.Get(), resource_id, D3D11_MAP_READ, 0, &m_OutputResource);
The difference between rendering to a window is that I'm also creating swapchain. So my question is how can I fix it (flipping on CPU bad solution and it may cause problems with shaders like in this example where I have different color for sphere)

C++ DirectX11 Texture On Terrain Not Rendering Properly

I am developing a game engine using the Rastertek tutorials.
My problem is that the terrain texture isn't loading properly.
Pixel Shader:
Texture2D shaderTexture;
SamplerState SampleType;
cbuffer LightBuffer
{
float4 ambientColor;
float4 diffuseColor;
float3 lightDirection;
float padding;
};
//////////////
// TYPEDEFS //
//////////////
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
////////////////////////////////////////////////////////////////////////////////
// Pixel Shader
////////////////////////////////////////////////////////////////////////////////
float4 TerrainPixelShader(PixelInputType input) : SV_TARGET
{
float4 textureColor;
float3 lightDir;
float lightIntensity;
float4 color;
// Sample the pixel color from the texture using the sampler at this texture coordinate location.
textureColor = shaderTexture.Sample(SampleType, input.tex);
// Set the default output color to the ambient light value for all pixels.
color = ambientColor;
// Invert the light direction for calculations.
lightDir = -lightDirection;
// Calculate the amount of light on this pixel.
lightIntensity = saturate(dot(input.normal, lightDir));
if(lightIntensity > 0.0f)
{
// Determine the final diffuse color based on the diffuse color and the amount of light intensity.
color += (diffuseColor * lightIntensity);
}
// Saturate the final light color.
color = saturate(color);
// Multiply the texture pixel and the final light color to get the result.
color = color * textureColor;
Vertex Shader:
cbuffer MatrixBuffer
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;
};
//////////////
// TYPEDEFS //
//////////////
struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
////////////////////////////////////////////////////////////////////////////////
// Vertex Shader
////////////////////////////////////////////////////////////////////////////////
PixelInputType TerrainVertexShader(VertexInputType input)
{
PixelInputType output;
// Change the position vector to be 4 units for proper matrix calculations.
input.position.w = 1.0f;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
// Store the texture coordinates for the pixel shader.
output.tex = input.tex;
// Calculate the normal vector against the world matrix only.
output.normal = mul(input.normal, (float3x3)worldMatrix);
// Normalize the normal vector.
output.normal = normalize(output.normal);
return output;
}
Terrain Shader Class:
bool TerrainShaderClass::SetShaderParameters(ID3D11DeviceContext* deviceContext, D3DXMATRIX world, D3DXMATRIX view,
D3DXMATRIX projection, D3DXVECTOR4 ambientColor, D3DXVECTOR4 diffuseColor, D3DXVECTOR3 lightDirection,
ID3D11ShaderResourceView* texture)
{
HRESULT result;
D3D11_MAPPED_SUBRESOURCE mappedResource;
unsigned int bufferNumber;
MatrixBufferType* matrixData;
LightBufferType* lightData;
D3DXMatrixTranspose(&world, &world);
D3DXMatrixTranspose(&view, &view);
D3DXMatrixTranspose(&projection, &projection);
result = deviceContext->Map(m_matrixBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
if (FAILED(result))
{
return false;
}
matrixData = (MatrixBufferType*)mappedResource.pData;
matrixData->world = world;
matrixData->view = view;
matrixData->projection = projection;
deviceContext->Unmap(m_matrixBuffer, 0);
bufferNumber = 0;
deviceContext->VSSetConstantBuffers(bufferNumber, 1, &m_matrixBuffer);
deviceContext->Map(m_lightBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
lightData = (LightBufferType*)mappedResource.pData;
lightData->ambientColor = ambientColor;
lightData->diffuseColor = diffuseColor;
lightData->lightDirection = lightDirection;
lightData->padding = 0.0f;
deviceContext->Unmap(m_lightBuffer, 0);
bufferNumber = 0;
deviceContext->PSSetConstantBuffers(bufferNumber, 1, &m_lightBuffer);
deviceContext->PSSetShaderResources(0, 1, &texture);
return true;
}
void TerrainShaderClass::OutputShaderErrorMessage(ID3D10Blob* errorMessage, HWND hwnd, LPCSTR shaderFileName)
{
char* compileErrors = (char*)(errorMessage->GetBufferPointer());
unsigned long bufferSize = errorMessage->GetBufferSize();
ofstream fout;
fout.open("shader-error.txt");
for (unsigned long i = 0; i < bufferSize; i++)
{
fout << compileErrors[i];
}
fout.close();
errorMessage->Release();
errorMessage = nullptr;
MessageBox(hwnd, "Error compiling shader. Check shader-error.txt for message.", shaderFileName, MB_OK);
}
void TerrainShaderClass::RenderShader(ID3D11DeviceContext* deviceContext, int indexCount)
{
deviceContext->IASetInputLayout(m_layout);
deviceContext->VSSetShader(m_vertexShader, NULL, 0);
deviceContext->PSSetShader(m_pixelShader, NULL, 0);
deviceContext->PSSetSamplers(0, 1, &m_samplerState);
deviceContext->DrawIndexed(indexCount, 0, 0);
}
bool TerrainShaderClass::InitializeShader(ID3D11Device* device, HWND hwnd, LPCSTR vsFileName, LPCSTR psFileName)
{
HRESULT result;
ID3D10Blob* errorMessage = nullptr;
ID3D10Blob* vertexShaderBuffer = nullptr;
ID3D10Blob* pixelShaderBuffer = nullptr;
D3D11_INPUT_ELEMENT_DESC polygonLayout[3];
unsigned int numElements;
D3D11_SAMPLER_DESC samplerDesc;
D3D11_BUFFER_DESC matrixBufferDesc;
D3D11_BUFFER_DESC lightBufferDesc;
result = D3DX11CompileFromFile(vsFileName, NULL, NULL, "TerrainVertexShader", "vs_5_0", D3D10_SHADER_ENABLE_STRICTNESS,
0, NULL, &vertexShaderBuffer, &errorMessage, NULL);
if (FAILED(result))
{
if (errorMessage)
{
OutputShaderErrorMessage(errorMessage, hwnd, vsFileName);
}
else
{
MessageBox(hwnd, "Missing Shader File", vsFileName, MB_OK);
}
return false;
}
result = D3DX11CompileFromFile(psFileName, NULL, NULL, "TerrainPixelShader", "ps_5_0", D3D10_SHADER_ENABLE_STRICTNESS,
0, NULL, &pixelShaderBuffer, &errorMessage, NULL);
if (FAILED(result))
{
if (errorMessage)
{
OutputShaderErrorMessage(errorMessage, hwnd, psFileName);
}
else
{
MessageBox(hwnd, "Missing Shader File", psFileName, MB_OK);
}
return false;
}
result = device->CreateVertexShader(vertexShaderBuffer->GetBufferPointer(), vertexShaderBuffer->GetBufferSize(), NULL, &m_vertexShader);
if (FAILED(result))
{
return false;
}
result = device->CreatePixelShader(pixelShaderBuffer->GetBufferPointer(), pixelShaderBuffer->GetBufferSize(), NULL, &m_pixelShader);
if (FAILED(result))
{
return false;
}
polygonLayout[0].SemanticName = "POSITION";
polygonLayout[0].SemanticIndex = 0;
polygonLayout[0].Format = DXGI_FORMAT_R32G32B32_FLOAT;
polygonLayout[0].InputSlot = 0;
polygonLayout[0].AlignedByteOffset = 0;
polygonLayout[0].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygonLayout[0].InstanceDataStepRate = 0;
polygonLayout[1].SemanticName = "TEXCOORD";
polygonLayout[1].SemanticIndex = 0;
polygonLayout[1].Format = DXGI_FORMAT_R32G32_FLOAT;
polygonLayout[1].InputSlot = 0;
polygonLayout[1].AlignedByteOffset = D3D11_APPEND_ALIGNED_ELEMENT;
polygonLayout[1].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygonLayout[1].InstanceDataStepRate = 0;
polygonLayout[2].SemanticName = "NORMAL";
polygonLayout[2].SemanticIndex = 0;
polygonLayout[2].Format = DXGI_FORMAT_R32G32B32_FLOAT;
polygonLayout[2].InputSlot = 0;
polygonLayout[2].AlignedByteOffset = D3D11_APPEND_ALIGNED_ELEMENT;
polygonLayout[2].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygonLayout[2].InstanceDataStepRate = 0;
numElements = sizeof(polygonLayout) / sizeof(polygonLayout[0]);
result = device->CreateInputLayout(polygonLayout, numElements, vertexShaderBuffer->GetBufferPointer(), vertexShaderBuffer->GetBufferSize(), &m_layout);
if (FAILED(result))
{
return false;
}
vertexShaderBuffer->Release();
vertexShaderBuffer = nullptr;
pixelShaderBuffer->Release();
pixelShaderBuffer = nullptr;
samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.MipLODBias = 0.0f;
samplerDesc.MaxAnisotropy = 1;
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerDesc.BorderColor[0] = 0;
samplerDesc.BorderColor[1] = 0;
samplerDesc.BorderColor[2] = 0;
samplerDesc.BorderColor[3] = 0;
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
result = device->CreateSamplerState(&samplerDesc, &m_samplerState);
if (FAILED(result))
{
return false;
}
matrixBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
matrixBufferDesc.ByteWidth = sizeof(MatrixBufferType);
matrixBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
matrixBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
matrixBufferDesc.MiscFlags = 0;
matrixBufferDesc.StructureByteStride = 0;
result = device->CreateBuffer(&matrixBufferDesc, NULL, &m_matrixBuffer);
if (FAILED(result))
{
return false;
}
//ByteWidth must be a multiple of 16 if using D3D11_BIND_CONSTANT_BUFFER or CreateBuffer will fail.
lightBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
lightBufferDesc.ByteWidth = sizeof(LightBufferType);
lightBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
lightBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
lightBufferDesc.MiscFlags = 0;
lightBufferDesc.StructureByteStride = 0;
device->CreateBuffer(&lightBufferDesc, NULL, &m_lightBuffer);
if (FAILED(result))
{
return false;
}
return true;
}
The texture is supposed to look like displayed on the link, but instead looks like really weird (can't seem to be able to take a screen shot, will add if possible).
I tried looking into other questions here, but none solved the issue.
I am still fairly new to DX11, so any help is much appreciated.
Edit: Here is a screenshot (left side: supposed, right side: my game)
Im looking at your screenshot, and your not only is your texture not rendering correctly, but your normals aren't either otherwise you would have a diffuse at least shading it properly. I would summise, that although your stride is correct, what you are pulling out of the buffer for UV and Normal is not aligned properly. My first thoughts.

Texture shows as black

I am trying to make a texture to cover a triangle instead of color, but it looks like there is something wrong with the Texture Initialization...
Texture loading is fine, I tried it with Opengl and It worked perfectly however it isn't working in Directx 11.
void DX11Texture2D::Create(Texture_Data Data, Texture_Desc Desc)
{
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(texDesc));
texDesc.Width = Data.width;
texDesc.Height = Data.height;
texDesc.MipLevels = 0;
texDesc.ArraySize = 1;
texDesc.Format = GetDXTextureFormat(Desc.Format);
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA subData;
subData.pSysMem = Data.databuf;
subData.SysMemPitch = Data.width * 4;
subData.SysMemSlicePitch = Data.width * Data.height * 4;
Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
//TODO: Add a way to disable and enable mip map
Core::Internals::DX11Renderer::GetDevice()->CreateShaderResourceView(textureID, 0, &resourceView);
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.AddressU = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressV = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressW = GetDXTextureWrap(Desc.Wrap);
samplerDesc.Filter = GetDXTextureFilter(Desc.Filter);
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
Core::Internals::DX11Renderer::GetDevice()->CreateSamplerState(&samplerDesc, &samplerState);
}
void DX11Texture2D::Bind(unsigned int index)
{
Core::Internals::DX11Renderer::GetContext()->PSSetShaderResources(index, 1, &resourceView);
Core::Internals::DX11Renderer::GetContext()->PSSetSamplers(index, 1, &samplerState);
}
DXGI_FORMAT DX11Texture2D::GetDXTextureFormat(API::TextureFormat format)
{
switch (format)
{
case API::TextureFormat::R8: return DXGI_FORMAT_R8_UNORM;
case API::TextureFormat::R8G8: return DXGI_FORMAT_R8G8_UNORM;
case API::TextureFormat::R8G8B8: return DXGI_FORMAT_R8G8B8A8_UNORM;
case API::TextureFormat::R8G8B8A8: return DXGI_FORMAT_R8G8B8A8_UNORM;
default: return DXGI_FORMAT_R8G8B8A8_UNORM;
}
}
D3D11_TEXTURE_ADDRESS_MODE DX11Texture2D::GetDXTextureWrap(API::TextureWrap textureWrap)
{
switch (textureWrap)
{
case API::TextureWrap::Repeat: return D3D11_TEXTURE_ADDRESS_WRAP;
case API::TextureWrap::MirroredReapeat: return D3D11_TEXTURE_ADDRESS_MIRROR;
case API::TextureWrap::ClampToEdge: return D3D11_TEXTURE_ADDRESS_CLAMP;
case API::TextureWrap::ClampToBorder: return D3D11_TEXTURE_ADDRESS_BORDER;
default: return D3D11_TEXTURE_ADDRESS_WRAP;
}
}
D3D11_FILTER DX11Texture2D::GetDXTextureFilter(API::TextureFilter textureFilter)
{
//TODO: Add more texture filter types to control both min and mag
switch (textureFilter)
{
case API::TextureFilter::Nearest: return D3D11_FILTER_MIN_MAG_MIP_POINT;
case API::TextureFilter::Linear: return D3D11_FILTER_MIN_MAG_MIP_LINEAR;
default: return D3D11_FILTER_MIN_MAG_MIP_POINT;
}
}
Also my shaders
Triangle.vs:
struct VertexInputType
{
float3 position : POSITION;
float3 color : COLOR;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = float4(input.position, 1);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
}
Triangle.ps
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D shaderTexture : register(t0);;
SamplerState SampleType : register(s0);;
float4 main(PixelInputType input) : SV_TARGET
{
return shaderTexture.Sample(SampleType, input.tex);
}
Cheers,
Zlixine.
It seems you are creating a texture with a full mip map chain, but looking at initial data it looks like you provide only one slice (if your sampler tries to access another slice, since no data was provided, it will be black)
In that case you should enforce single mip by using :
texDesc.MipLevels = 1;
The line:
subData.SysMemSlicePitch = Data.width * Data.height * 4;
is not needed, as this parameter is ignored in case of 2d texture (you can leave it to 0 or any value).
Also, you should check result codes when creating resources eg:
HRESULT hr = Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
if FAILED(hr)
{
//Handle issue if texture creation did fail
}
And make sure to use D3D11CreateDevice with D3D11_CREATE_DEVICE_DEBUG,
so when debugging, you will have meaningful error messages in your visual studio output window (if creation fails, you will have an explanation instead of getting only INVALIDARG hresult, which is not really useful)

Using unsigned byte textures with DirectX 10 / 11

I am attempting to do some processing in the pixel shader on a texture. The data for the texture is coming from a memory chunk of 8 bit data. The problem I am facing is how to read the data in the shader.
Code to create the texture and ressource view:
In OnD3D11CreateDevice:
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Height = 480;
tDesc.Width = 640;
tDesc.Usage = D3D11_USAGE_DYNAMIC;
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Format = DXGI_FORMAT_R8_UINT;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tDesc.MiscFlags = 0;
V_RETURN(pd3dDevice->CreateTexture2D(&tDesc, NULL, &g_pCurrentImage));
D3D11_SHADER_RESOURCE_VIEW_DESC rvDesc;
g_pCurrentImage->GetDesc(&tDesc);
rvDesc.Format = DXGI_FORMAT_R8_UINT;
rvDesc.Texture2D.MipLevels = tDesc.MipLevels;
rvDesc.Texture2D.MostDetailedMip = tDesc.MipLevels - 1;
rvDesc.ViewDimension = D3D_SRV_DIMENSION_TEXTURE2D;
V_RETURN(pd3dDevice->CreateShaderResourceView(g_pCurrentImage, &rvDesc, &g_pImageRV)); </code>
in OnD3D11FrameRender:
HRESULT okay;
if( !g_updateDone ) {
D3D11_MAPPED_SUBRESOURCE resource;
resource.pData = mImage.GetData();
resource.RowPitch = 640;
resource.DepthPitch = 1;
okay = pd3dImmediateContext->Map(g_pCurrentImage, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
g_updateDone = true;
}
pd3dImmediateContext->PSSetShaderResources(0, 1, &g_pImageRV);
This returns no errors so far, everything seems to work.
The HLSL Shader:
//-----
// Textures and Samplers
//-----
Texture2D <int> g_txDiffuse : register( t0 );
SamplerState g_samLinear : register( s0 );
//-----
// shader input/output structure
//-----
struct VS_INPUT
{
float4 Position : POSITION; // vertex position
float2 TextureUV : TEXCOORD0;// vertex texture coords
};
struct VS_OUTPUT
{
float4 Position : SV_POSITION; // vertex position
float2 TextureUV : TEXCOORD0; // vertex texture coords
};
//-----
// Vertex shader
//-----
VS_OUTPUT RenderSceneVS( VS_INPUT input )
{
VS_OUTPUT Output;
Output.Position = input.Position;
Output.TextureUV = input.TextureUV;
return Output;
}
//-----
// Pixel Shader
//-----
float4 RenderScenePS( VS_OUTPUT In ) : SV_TARGET
{
int3 loc;
loc.x = 0;
loc.y = 0;
loc.z = 1;
int r = g_txDiffuse.Load(loc);
//float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, 1);
}
The thing is, I can't even debug it in PIX to see what r results in, because even with Shader optimization disabled, the line int r = ... is never reached
I tested
float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, fTest);
but this would result in "cannot map expression to pixel shader instruction set", even though it's a float.
So how do I read and use 8bit integers from a texture, and if possible, with no sampling at all.
Thanks for any feedback.
Oh my this is a really old question, I thought it said 2012!
But anyway as it's still open:
Due to the nature of GPU's being optimised for floating point arithmetic, you probably wont get a great deal of performance advantage by using a Texture2D<int> over a Texture2D<float>.
You could attempt to use a Texture2D<float> and then try:
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, g_txDiffuse.Load(loc));
loc.z = 1;
Should be 0 here, because texture mip levels is 1 in your case, and mipmaps start at 0 in HLSL for Load intrinsic.