I am trying to make a texture to cover a triangle instead of color, but it looks like there is something wrong with the Texture Initialization...
Texture loading is fine, I tried it with Opengl and It worked perfectly however it isn't working in Directx 11.
void DX11Texture2D::Create(Texture_Data Data, Texture_Desc Desc)
{
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(texDesc));
texDesc.Width = Data.width;
texDesc.Height = Data.height;
texDesc.MipLevels = 0;
texDesc.ArraySize = 1;
texDesc.Format = GetDXTextureFormat(Desc.Format);
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA subData;
subData.pSysMem = Data.databuf;
subData.SysMemPitch = Data.width * 4;
subData.SysMemSlicePitch = Data.width * Data.height * 4;
Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
//TODO: Add a way to disable and enable mip map
Core::Internals::DX11Renderer::GetDevice()->CreateShaderResourceView(textureID, 0, &resourceView);
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.AddressU = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressV = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressW = GetDXTextureWrap(Desc.Wrap);
samplerDesc.Filter = GetDXTextureFilter(Desc.Filter);
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
Core::Internals::DX11Renderer::GetDevice()->CreateSamplerState(&samplerDesc, &samplerState);
}
void DX11Texture2D::Bind(unsigned int index)
{
Core::Internals::DX11Renderer::GetContext()->PSSetShaderResources(index, 1, &resourceView);
Core::Internals::DX11Renderer::GetContext()->PSSetSamplers(index, 1, &samplerState);
}
DXGI_FORMAT DX11Texture2D::GetDXTextureFormat(API::TextureFormat format)
{
switch (format)
{
case API::TextureFormat::R8: return DXGI_FORMAT_R8_UNORM;
case API::TextureFormat::R8G8: return DXGI_FORMAT_R8G8_UNORM;
case API::TextureFormat::R8G8B8: return DXGI_FORMAT_R8G8B8A8_UNORM;
case API::TextureFormat::R8G8B8A8: return DXGI_FORMAT_R8G8B8A8_UNORM;
default: return DXGI_FORMAT_R8G8B8A8_UNORM;
}
}
D3D11_TEXTURE_ADDRESS_MODE DX11Texture2D::GetDXTextureWrap(API::TextureWrap textureWrap)
{
switch (textureWrap)
{
case API::TextureWrap::Repeat: return D3D11_TEXTURE_ADDRESS_WRAP;
case API::TextureWrap::MirroredReapeat: return D3D11_TEXTURE_ADDRESS_MIRROR;
case API::TextureWrap::ClampToEdge: return D3D11_TEXTURE_ADDRESS_CLAMP;
case API::TextureWrap::ClampToBorder: return D3D11_TEXTURE_ADDRESS_BORDER;
default: return D3D11_TEXTURE_ADDRESS_WRAP;
}
}
D3D11_FILTER DX11Texture2D::GetDXTextureFilter(API::TextureFilter textureFilter)
{
//TODO: Add more texture filter types to control both min and mag
switch (textureFilter)
{
case API::TextureFilter::Nearest: return D3D11_FILTER_MIN_MAG_MIP_POINT;
case API::TextureFilter::Linear: return D3D11_FILTER_MIN_MAG_MIP_LINEAR;
default: return D3D11_FILTER_MIN_MAG_MIP_POINT;
}
}
Also my shaders
Triangle.vs:
struct VertexInputType
{
float3 position : POSITION;
float3 color : COLOR;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = float4(input.position, 1);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
}
Triangle.ps
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D shaderTexture : register(t0);;
SamplerState SampleType : register(s0);;
float4 main(PixelInputType input) : SV_TARGET
{
return shaderTexture.Sample(SampleType, input.tex);
}
Cheers,
Zlixine.
It seems you are creating a texture with a full mip map chain, but looking at initial data it looks like you provide only one slice (if your sampler tries to access another slice, since no data was provided, it will be black)
In that case you should enforce single mip by using :
texDesc.MipLevels = 1;
The line:
subData.SysMemSlicePitch = Data.width * Data.height * 4;
is not needed, as this parameter is ignored in case of 2d texture (you can leave it to 0 or any value).
Also, you should check result codes when creating resources eg:
HRESULT hr = Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
if FAILED(hr)
{
//Handle issue if texture creation did fail
}
And make sure to use D3D11CreateDevice with D3D11_CREATE_DEVICE_DEBUG,
so when debugging, you will have meaningful error messages in your visual studio output window (if creation fails, you will have an explanation instead of getting only INVALIDARG hresult, which is not really useful)
Related
I'm making my graphics engine. I want to have the ability to write it on C++, but create UI for an editor on C#. So with some defines, I disable rendering to a window and trying to do the off-screen rendering to pass then data to c# but I have a problem, I'm understanding why it's happening (it's how DirectX create textures and stores them) but have no clue how to fix it. So here are the results.
Imaged rendered to the window:
Image rendered to bmp (flipped):
On the first image, all looks good, and on the second as you can see I have flipped Y, and maybe X (not sure) coordinates. And for maybe useful information, I represent normals as color.
Here is my code
Vertex Buffer
cbuffer Transformation : register(b0) {
matrix transformation;
};
cbuffer ViewProjection : register(b1) {
matrix projection;
matrix view;
};
struct VS_OUT {
float2 texcoord : TextureCoordinate;
float3 normal : Normal;
float4 position : SV_Position;
};
VS_OUT main(float3 position : Position, float3 normal : Normal, float2 texcoord : TextureCoordinate) {
matrix tView = transpose(view);
matrix tProjection = transpose(projection);
matrix tTransformation = transpose(transformation);
matrix MVP = mul(tTransformation, mul(tView, tProjection));
VS_OUT result;
result.position = mul(float4(position, 1.0f), MVP);
result.texcoord = texcoord;
result.normal = normal;
return result;
}
Pixel buffer
float4 main(float2 texcoord : TextureCoordinate, float3 normal : Normal) : SV_Target
{
float3 color = (normal + 1) * 0.5f;
return float4(color.rgb, 1.0f);
}
DirectX code for offscreen rendering initialization
D3D_FEATURE_LEVEL FeatureLevels[] = {
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1
};
UINT deviceFlags = 0;
#if defined(DEBUG) || defined(_DEBUG)
deviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
#endif
DirectX11Call(D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
deviceFlags,
FeatureLevels,
ARRAYSIZE(FeatureLevels),
D3D11_SDK_VERSION,
&m_Device,
&m_FeatureLevel,
&m_DeviceContext
))
D3D11_TEXTURE2D_DESC renderingDescription = {};
renderingDescription.Width = width;
renderingDescription.Height = height;
renderingDescription.ArraySize = 1;
renderingDescription.SampleDesc.Count = 1;
renderingDescription.Usage = D3D11_USAGE_DEFAULT;
renderingDescription.BindFlags = D3D11_BIND_RENDER_TARGET;
renderingDescription.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
m_Device->CreateTexture2D(&renderingDescription, nullptr, &m_Target);
renderingDescription.BindFlags = 0;
renderingDescription.Usage = D3D11_USAGE_STAGING;
renderingDescription.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
m_Device->CreateTexture2D(&renderingDescription, nullptr, &m_Output);
DirectX11Call(m_Device->CreateRenderTargetView(m_Target.Get(), nullptr, &m_RenderTargetView))
D3D11_DEPTH_STENCIL_DESC depthStencilStateDescription = {};
depthStencilStateDescription.DepthEnable = TRUE;
depthStencilStateDescription.DepthWriteMask = D3D11_DEPTH_WRITE_MASK_ALL;
depthStencilStateDescription.DepthFunc = D3D11_COMPARISON_LESS;
Microsoft::WRL::ComPtr<ID3D11DepthStencilState> depthStencilState;
DirectX11Call(m_Device->CreateDepthStencilState(&depthStencilStateDescription, &depthStencilState))
m_DeviceContext->OMSetDepthStencilState(depthStencilState.Get(), 0);
D3D11_TEXTURE2D_DESC depthStencilDescription = {};
depthStencilDescription.Width = width;
depthStencilDescription.Height = height;
depthStencilDescription.MipLevels = 1;
depthStencilDescription.ArraySize = 1;
depthStencilDescription.Format = DXGI_FORMAT_D32_FLOAT;
depthStencilDescription.SampleDesc.Count = 1;
depthStencilDescription.SampleDesc.Quality = 0;
depthStencilDescription.Usage = D3D11_USAGE_DEFAULT;
depthStencilDescription.BindFlags = D3D11_BIND_DEPTH_STENCIL;
depthStencilDescription.CPUAccessFlags = 0;
depthStencilDescription.MiscFlags = 0;
DirectX11Call(m_Device->CreateTexture2D(&depthStencilDescription, nullptr, &m_DepthStencilBuffer))
D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDescription = {};
depthStencilViewDescription.Format = DXGI_FORMAT_D32_FLOAT;
depthStencilViewDescription.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
depthStencilViewDescription.Texture2D.MipSlice = 0;
DirectX11Call(m_Device->CreateDepthStencilView(m_DepthStencilBuffer.Get(), &depthStencilViewDescription, &m_DepthStencilView))
m_DeviceContext->OMSetRenderTargets(1, m_RenderTargetView.GetAddressOf(), m_DepthStencilView.Get());
m_DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
D3D11_VIEWPORT viewPort;
viewPort.TopLeftX = 0.0f;
viewPort.TopLeftY = 0.0f;
viewPort.Width = static_cast<float>(width);
viewPort.Height = static_cast<float>(height);
viewPort.MinDepth = 0.0f;
viewPort.MaxDepth = 1.0f;
m_DeviceContext->RSSetViewports(1, &viewPort);
D3D11_RASTERIZER_DESC rasterizerDescription = {};
rasterizerDescription.FillMode = D3D11_FILL_SOLID;
rasterizerDescription.CullMode = D3D11_CULL_FRONT;
Microsoft::WRL::ComPtr<ID3D11RasterizerState> rasterizerState;
DirectX11Call(m_Device->CreateRasterizerState(&rasterizerDescription, &rasterizerState))
m_DeviceContext->RSSetState(rasterizerState.Get());
Code for drawing to texture
m_DeviceContext->Flush();
m_DeviceContext->CopyResource(m_Output.Get(), m_Target.Get());
static const UINT resource_id = D3D11CalcSubresource(0, 0, 0);
m_DeviceContext->Map(m_Output.Get(), resource_id, D3D11_MAP_READ, 0, &m_OutputResource);
The difference between rendering to a window is that I'm also creating swapchain. So my question is how can I fix it (flipping on CPU bad solution and it may cause problems with shaders like in this example where I have different color for sphere)
I'm trying to use FreeType under DirectX 11, in order to draw some text on my screen. For the moment i'm able to convert a FT_Bitmap* to a ID3D11Texture2D*, then I create a ID3D11ShaderResourceView*.
I then use a shader to draw this shader resource view. The problem is that this code only draws the last letter of the text 4 times, and I don't really see why.
First, here is how I render a UI Element :
Graph.cpp :
void Graph::DrawSprite(Object* p_object)
{
/*...*/
// Problem here
Text* text = nullptr;
text = p_object->GetComponent<Text>();
if (text)
_graphicAPI->Draw(text->GetBuffer());
/*...*/
}
GraphicAPI :
void GraphicAPI::Draw(const TextBuffer& p_text)
{
_shaders._orthoWindow->Render(_deviceContext);
_shaders._sprite->Render(_deviceContext, _shaders._orthoWindow->GetIndexCount(),
_managers._font->GetTextureView(p_text._font), p_text._matrix, _camera2D);
}
SpriteShader.cpp :
bool SpriteShader::Render(ID3D11DeviceContext* p_deviceContext, unsigned int p_indexCount, ID3D11ShaderResourceView* p_texView,
const Math::Mat4& p_matrix, const CameraBuffer& p_camera)
{
bool result = SetShaderParameters(p_deviceContext, p_texView, p_matrix, p_camera._view, p_camera._projection);
if (!result)
return false;
RenderShader(p_deviceContext, p_indexCount);
return true;
}
bool SpriteShader::SetShaderParameters(ID3D11DeviceContext* p_deviceContext, ID3D11ShaderResourceView* p_texView,
const Math::Mat4& p_worldMatrix, const Math::Mat4& p_viewMatrix, const Math::Mat4& p_projectionMatrix)
{
HRESULT result;
D3D11_MAPPED_SUBRESOURCE mappedResource;
unsigned int bufferNumber;
MatrixBufferType* dataMatrixPtr;
result = p_deviceContext->Map(_matrixBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
if (FAILED(result))
return false;
dataMatrixPtr = (MatrixBufferType*)mappedResource.pData;
dataMatrixPtr->world = p_worldMatrix;
dataMatrixPtr->view = p_viewMatrix;
dataMatrixPtr->projection = p_projectionMatrix;
p_deviceContext->Unmap(_matrixBuffer, 0);
bufferNumber = 0;
p_deviceContext->VSSetConstantBuffers(bufferNumber, 1, &_matrixBuffer);
p_deviceContext->PSSetShaderResources(0, 1, &p_texView);
return true;
}
void SpriteShader::RenderShader(ID3D11DeviceContext* p_deviceContext, unsigned int p_indexCount)
{
p_deviceContext->IASetInputLayout(_layout);
p_deviceContext->VSSetShader(_vertexShader, NULL, 0);
p_deviceContext->PSSetShader(_pixelShader, NULL, 0);
p_deviceContext->PSSetSamplers(0, 1, &_sampleState);
p_deviceContext->DrawIndexed(p_indexCount, 0, 0);
}
Here is how I load a TTF font and convert it to a ID3D11ShaderResourceView*.
FontManager.cpp :
bool FontManager::LoadFont(ID3D11Device* p_device, const std::string p_extension, const char* p_fileName)
{
if (p_extension == ".ttf")
{
Font* font = new Font();
if (!font)
return false;
if (!font->LoadTTF(p_device, _library, p_fileName))
{
delete font;
return false;
}
// sets text (tmp)
font->RenderFont(p_device, "ASDASD", Math::Vec2(100, 100));
_fonts.push_back(font);
return true;
}
return false;
}
For the Font class, I based myself on this tutorial. CreateShaderResourceView() is my_draw_bitmap().
Font.cpp :
bool Font::LoadTTF(ID3D11Device* p_device, FT_Library p_library, const char* p_fileName)
{
_fileName = p_fileName;
if (FT_New_Face(p_library, p_fileName, 0, &_face))
return false;
if (FT_Set_Pixel_Sizes(_face, 0, DEFAULT_FONT_SIZE))
return false;
return true;
}
void Font::RenderFont(ID3D11Device* p_device, const char* p_text, Math::Vec2 p_position)
{
FT_GlyphSlot slot = _face->glyph;
FT_Vector pen;
pen.x = p_position._x;
pen.y = p_position._y;
FT_Matrix matrix;
float angle = (/*90*/0.0 / 360) * 3.14159 * 2;
matrix.xx = (FT_Fixed)(cos(angle) * 0x10000L);
matrix.xy = (FT_Fixed)(-sin(angle) * 0x10000L);
matrix.yx = (FT_Fixed)(sin(angle) * 0x10000L);
matrix.yy = (FT_Fixed)(cos(angle) * 0x10000L);
for (unsigned int i = 0; i < strlen(p_text); ++i)
{
FT_Set_Transform(_face, &matrix, &pen);
if (FT_Load_Char(_face, p_text[i], FT_LOAD_RENDER))
continue;
if (!CreateShaderResourceView(p_device, &slot->bitmap))
return;
// Increment pen position
pen.x += slot->advance.x >> 6;
}
}
bool Font::CreateShaderResourceView(ID3D11Device* p_device, FT_Bitmap* p_bitmap)
{
D3D11_TEXTURE2D_DESC textureDesc;
ZeroMemory(&textureDesc, sizeof(textureDesc));
textureDesc.Width = p_bitmap->width;
textureDesc.Height = p_bitmap->rows;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
textureDesc.MiscFlags = 0;
ID3D11Texture2D* texture2D;
ZeroMemory(&texture2D, sizeof(texture2D));
D3D11_SUBRESOURCE_DATA resourceData;
resourceData.pSysMem = p_bitmap->buffer;
resourceData.SysMemPitch = p_bitmap->pitch;
resourceData.SysMemSlicePitch = 0;
HRESULT res = p_device->CreateTexture2D(&textureDesc, &resourceData, &texture2D);
if (FAILED(res))
return false;
D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc;
shaderResourceViewDesc.Format = textureDesc.Format;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
HRESULT result = p_device->CreateShaderResourceView(texture2D, &shaderResourceViewDesc, &_shaderResourceView);
if (FAILED(result))
return false;
return true;
}
And the shaders :
Sprite.ps (I use this shader to draw UI elements):
Texture2D spriteTexture;
SamplerState SampleType;
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
};
float4 SpritePixelShader(PixelInputType input) : SV_TARGET
{
float4 textureColor;
textureColor = spriteTexture.Sample(SampleType, input.tex);
return textureColor;
}
Sprite.vs :
cbuffer MatrixBuffer
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;
};
struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD0;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
};
PixelInputType SpriteVertexShader(VertexInputType input)
{
PixelInputType output;
input.position.w = 1.0f;
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
output.tex = input.tex;
return output;
}
Here is what I get :
This guy (here) had the same issue, but the solution doesn't work for me. Does anyone know how to solve this ? I've been stuck on this for a week now...
It seems that inside of for (unsigned int i = 0; i < strlen(p_text); ++i) you try to render font letter-by-letter and recreate shader resource view on each iteration. So at the end you are left with shader resource view containing last letter texture view.
I am developing a game engine using the Rastertek tutorials.
My problem is that the terrain texture isn't loading properly.
Pixel Shader:
Texture2D shaderTexture;
SamplerState SampleType;
cbuffer LightBuffer
{
float4 ambientColor;
float4 diffuseColor;
float3 lightDirection;
float padding;
};
//////////////
// TYPEDEFS //
//////////////
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
////////////////////////////////////////////////////////////////////////////////
// Pixel Shader
////////////////////////////////////////////////////////////////////////////////
float4 TerrainPixelShader(PixelInputType input) : SV_TARGET
{
float4 textureColor;
float3 lightDir;
float lightIntensity;
float4 color;
// Sample the pixel color from the texture using the sampler at this texture coordinate location.
textureColor = shaderTexture.Sample(SampleType, input.tex);
// Set the default output color to the ambient light value for all pixels.
color = ambientColor;
// Invert the light direction for calculations.
lightDir = -lightDirection;
// Calculate the amount of light on this pixel.
lightIntensity = saturate(dot(input.normal, lightDir));
if(lightIntensity > 0.0f)
{
// Determine the final diffuse color based on the diffuse color and the amount of light intensity.
color += (diffuseColor * lightIntensity);
}
// Saturate the final light color.
color = saturate(color);
// Multiply the texture pixel and the final light color to get the result.
color = color * textureColor;
Vertex Shader:
cbuffer MatrixBuffer
{
matrix worldMatrix;
matrix viewMatrix;
matrix projectionMatrix;
};
//////////////
// TYPEDEFS //
//////////////
struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float3 normal : NORMAL;
};
////////////////////////////////////////////////////////////////////////////////
// Vertex Shader
////////////////////////////////////////////////////////////////////////////////
PixelInputType TerrainVertexShader(VertexInputType input)
{
PixelInputType output;
// Change the position vector to be 4 units for proper matrix calculations.
input.position.w = 1.0f;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = mul(input.position, worldMatrix);
output.position = mul(output.position, viewMatrix);
output.position = mul(output.position, projectionMatrix);
// Store the texture coordinates for the pixel shader.
output.tex = input.tex;
// Calculate the normal vector against the world matrix only.
output.normal = mul(input.normal, (float3x3)worldMatrix);
// Normalize the normal vector.
output.normal = normalize(output.normal);
return output;
}
Terrain Shader Class:
bool TerrainShaderClass::SetShaderParameters(ID3D11DeviceContext* deviceContext, D3DXMATRIX world, D3DXMATRIX view,
D3DXMATRIX projection, D3DXVECTOR4 ambientColor, D3DXVECTOR4 diffuseColor, D3DXVECTOR3 lightDirection,
ID3D11ShaderResourceView* texture)
{
HRESULT result;
D3D11_MAPPED_SUBRESOURCE mappedResource;
unsigned int bufferNumber;
MatrixBufferType* matrixData;
LightBufferType* lightData;
D3DXMatrixTranspose(&world, &world);
D3DXMatrixTranspose(&view, &view);
D3DXMatrixTranspose(&projection, &projection);
result = deviceContext->Map(m_matrixBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
if (FAILED(result))
{
return false;
}
matrixData = (MatrixBufferType*)mappedResource.pData;
matrixData->world = world;
matrixData->view = view;
matrixData->projection = projection;
deviceContext->Unmap(m_matrixBuffer, 0);
bufferNumber = 0;
deviceContext->VSSetConstantBuffers(bufferNumber, 1, &m_matrixBuffer);
deviceContext->Map(m_lightBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
lightData = (LightBufferType*)mappedResource.pData;
lightData->ambientColor = ambientColor;
lightData->diffuseColor = diffuseColor;
lightData->lightDirection = lightDirection;
lightData->padding = 0.0f;
deviceContext->Unmap(m_lightBuffer, 0);
bufferNumber = 0;
deviceContext->PSSetConstantBuffers(bufferNumber, 1, &m_lightBuffer);
deviceContext->PSSetShaderResources(0, 1, &texture);
return true;
}
void TerrainShaderClass::OutputShaderErrorMessage(ID3D10Blob* errorMessage, HWND hwnd, LPCSTR shaderFileName)
{
char* compileErrors = (char*)(errorMessage->GetBufferPointer());
unsigned long bufferSize = errorMessage->GetBufferSize();
ofstream fout;
fout.open("shader-error.txt");
for (unsigned long i = 0; i < bufferSize; i++)
{
fout << compileErrors[i];
}
fout.close();
errorMessage->Release();
errorMessage = nullptr;
MessageBox(hwnd, "Error compiling shader. Check shader-error.txt for message.", shaderFileName, MB_OK);
}
void TerrainShaderClass::RenderShader(ID3D11DeviceContext* deviceContext, int indexCount)
{
deviceContext->IASetInputLayout(m_layout);
deviceContext->VSSetShader(m_vertexShader, NULL, 0);
deviceContext->PSSetShader(m_pixelShader, NULL, 0);
deviceContext->PSSetSamplers(0, 1, &m_samplerState);
deviceContext->DrawIndexed(indexCount, 0, 0);
}
bool TerrainShaderClass::InitializeShader(ID3D11Device* device, HWND hwnd, LPCSTR vsFileName, LPCSTR psFileName)
{
HRESULT result;
ID3D10Blob* errorMessage = nullptr;
ID3D10Blob* vertexShaderBuffer = nullptr;
ID3D10Blob* pixelShaderBuffer = nullptr;
D3D11_INPUT_ELEMENT_DESC polygonLayout[3];
unsigned int numElements;
D3D11_SAMPLER_DESC samplerDesc;
D3D11_BUFFER_DESC matrixBufferDesc;
D3D11_BUFFER_DESC lightBufferDesc;
result = D3DX11CompileFromFile(vsFileName, NULL, NULL, "TerrainVertexShader", "vs_5_0", D3D10_SHADER_ENABLE_STRICTNESS,
0, NULL, &vertexShaderBuffer, &errorMessage, NULL);
if (FAILED(result))
{
if (errorMessage)
{
OutputShaderErrorMessage(errorMessage, hwnd, vsFileName);
}
else
{
MessageBox(hwnd, "Missing Shader File", vsFileName, MB_OK);
}
return false;
}
result = D3DX11CompileFromFile(psFileName, NULL, NULL, "TerrainPixelShader", "ps_5_0", D3D10_SHADER_ENABLE_STRICTNESS,
0, NULL, &pixelShaderBuffer, &errorMessage, NULL);
if (FAILED(result))
{
if (errorMessage)
{
OutputShaderErrorMessage(errorMessage, hwnd, psFileName);
}
else
{
MessageBox(hwnd, "Missing Shader File", psFileName, MB_OK);
}
return false;
}
result = device->CreateVertexShader(vertexShaderBuffer->GetBufferPointer(), vertexShaderBuffer->GetBufferSize(), NULL, &m_vertexShader);
if (FAILED(result))
{
return false;
}
result = device->CreatePixelShader(pixelShaderBuffer->GetBufferPointer(), pixelShaderBuffer->GetBufferSize(), NULL, &m_pixelShader);
if (FAILED(result))
{
return false;
}
polygonLayout[0].SemanticName = "POSITION";
polygonLayout[0].SemanticIndex = 0;
polygonLayout[0].Format = DXGI_FORMAT_R32G32B32_FLOAT;
polygonLayout[0].InputSlot = 0;
polygonLayout[0].AlignedByteOffset = 0;
polygonLayout[0].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygonLayout[0].InstanceDataStepRate = 0;
polygonLayout[1].SemanticName = "TEXCOORD";
polygonLayout[1].SemanticIndex = 0;
polygonLayout[1].Format = DXGI_FORMAT_R32G32_FLOAT;
polygonLayout[1].InputSlot = 0;
polygonLayout[1].AlignedByteOffset = D3D11_APPEND_ALIGNED_ELEMENT;
polygonLayout[1].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygonLayout[1].InstanceDataStepRate = 0;
polygonLayout[2].SemanticName = "NORMAL";
polygonLayout[2].SemanticIndex = 0;
polygonLayout[2].Format = DXGI_FORMAT_R32G32B32_FLOAT;
polygonLayout[2].InputSlot = 0;
polygonLayout[2].AlignedByteOffset = D3D11_APPEND_ALIGNED_ELEMENT;
polygonLayout[2].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygonLayout[2].InstanceDataStepRate = 0;
numElements = sizeof(polygonLayout) / sizeof(polygonLayout[0]);
result = device->CreateInputLayout(polygonLayout, numElements, vertexShaderBuffer->GetBufferPointer(), vertexShaderBuffer->GetBufferSize(), &m_layout);
if (FAILED(result))
{
return false;
}
vertexShaderBuffer->Release();
vertexShaderBuffer = nullptr;
pixelShaderBuffer->Release();
pixelShaderBuffer = nullptr;
samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.MipLODBias = 0.0f;
samplerDesc.MaxAnisotropy = 1;
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerDesc.BorderColor[0] = 0;
samplerDesc.BorderColor[1] = 0;
samplerDesc.BorderColor[2] = 0;
samplerDesc.BorderColor[3] = 0;
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
result = device->CreateSamplerState(&samplerDesc, &m_samplerState);
if (FAILED(result))
{
return false;
}
matrixBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
matrixBufferDesc.ByteWidth = sizeof(MatrixBufferType);
matrixBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
matrixBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
matrixBufferDesc.MiscFlags = 0;
matrixBufferDesc.StructureByteStride = 0;
result = device->CreateBuffer(&matrixBufferDesc, NULL, &m_matrixBuffer);
if (FAILED(result))
{
return false;
}
//ByteWidth must be a multiple of 16 if using D3D11_BIND_CONSTANT_BUFFER or CreateBuffer will fail.
lightBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
lightBufferDesc.ByteWidth = sizeof(LightBufferType);
lightBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
lightBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
lightBufferDesc.MiscFlags = 0;
lightBufferDesc.StructureByteStride = 0;
device->CreateBuffer(&lightBufferDesc, NULL, &m_lightBuffer);
if (FAILED(result))
{
return false;
}
return true;
}
The texture is supposed to look like displayed on the link, but instead looks like really weird (can't seem to be able to take a screen shot, will add if possible).
I tried looking into other questions here, but none solved the issue.
I am still fairly new to DX11, so any help is much appreciated.
Edit: Here is a screenshot (left side: supposed, right side: my game)
Im looking at your screenshot, and your not only is your texture not rendering correctly, but your normals aren't either otherwise you would have a diffuse at least shading it properly. I would summise, that although your stride is correct, what you are pulling out of the buffer for UV and Normal is not aligned properly. My first thoughts.
I've been trying to get a series of 2D images, all same in size and formatted as a raw data, loaded into a 3D texture and then render that 3D texture on the screen. As I'm still pretty new with DirectX, I'm not sure what I'm doing wrong. Here's the relevant code:
// Open the image and copy its contents into a buffer
std::ifstream image("head256x256x109", std::ifstream::binary);
int size;
char* buffer;
if (image.is_open()) {
image.seekg(0, image.end);
size = image.tellg();
image.seekg(0, image.beg);
buffer = new char[size];
image.read(buffer, size);
image.close();
} else {
return false;
}
// Convert the data to RGBA data
// Here we are simply putting the same value to R, G, B and A channels.
char* RGBABuffer = new char[ size*4 ];
for( int nIndx = 0; nIndx < size; ++nIndx )
{
RGBABuffer[nIndx*4] = buffer[nIndx];
RGBABuffer[nIndx*4+1] = buffer[nIndx];
RGBABuffer[nIndx*4+2] = buffer[nIndx];
RGBABuffer[nIndx*4+3] = buffer[nIndx];
}
// Create the texture
D3D11_TEXTURE3D_DESC texture_desc;
ZeroMemory(&texture_desc, sizeof(texture_desc));
texture_desc.Width = 256;
texture_desc.Height = 256;
texture_desc.Depth = 109;
texture_desc.MipLevels = 1;
texture_desc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
texture_desc.Usage = D3D11_USAGE_DEFAULT;
texture_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
D3D11_SUBRESOURCE_DATA image_data;
ZeroMemory(&image_data, sizeof(image_data));
image_data.pSysMem = RGBABuffer;
image_data.SysMemPitch = 256 * 4;
image_data.SysMemSlicePitch = 256 * 256 * 4;
d3dResult = d3dDevice_->CreateTexture3D(&texture_desc, &image_data, &texture_);
// Create a sampler
D3D11_SAMPLER_DESC colorMapDesc;
ZeroMemory( &colorMapDesc, sizeof( colorMapDesc ) );
colorMapDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
colorMapDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
colorMapDesc.MaxLOD = D3D11_FLOAT32_MAX;
d3dResult = d3dDevice_->CreateSamplerState( &colorMapDesc, &colorMapSampler_ );
Shader code is as simple as possible:
Texture3D colorMap_ : register( t0 );
SamplerState colorSampler_ : register( s0 );
struct VS_Input
{
float4 pos : POSITION;
float4 tex0 : TEXCOORD0;
};
struct PS_Input
{
float4 pos : SV_POSITION;
float4 tex0 : TEXCOORD0;
};
PS_Input VS_Main( VS_Input vertex )
{
PS_Input vsOut = ( PS_Input )0;
vsOut.pos = vertex.pos;
vsOut.tex0 = vertex.tex0;
return vsOut;
}
float4 PS_Main( PS_Input frag ) : SV_TARGET
{
return colorMap_.Sample( colorSampler_, frag.tex0 );
}
Now I know that I would need to implement some sort of alpha blending for this to work properly, but I hoped I would at least get a first slice of the picture shown with this. However, all I get is the background color that I set in my Render function:
float clearColor[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
d3dContext_->ClearRenderTargetView( backBufferTarget_, clearColor );
Any help on what I've might be doing wrong as well as any further tips that I would need to do to achieve this goal (making a 3D texture out of a series of images and render it on screen) would be hugely helpful as there is very little documentation about this on the web.
I am attempting to do some processing in the pixel shader on a texture. The data for the texture is coming from a memory chunk of 8 bit data. The problem I am facing is how to read the data in the shader.
Code to create the texture and ressource view:
In OnD3D11CreateDevice:
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Height = 480;
tDesc.Width = 640;
tDesc.Usage = D3D11_USAGE_DYNAMIC;
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Format = DXGI_FORMAT_R8_UINT;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tDesc.MiscFlags = 0;
V_RETURN(pd3dDevice->CreateTexture2D(&tDesc, NULL, &g_pCurrentImage));
D3D11_SHADER_RESOURCE_VIEW_DESC rvDesc;
g_pCurrentImage->GetDesc(&tDesc);
rvDesc.Format = DXGI_FORMAT_R8_UINT;
rvDesc.Texture2D.MipLevels = tDesc.MipLevels;
rvDesc.Texture2D.MostDetailedMip = tDesc.MipLevels - 1;
rvDesc.ViewDimension = D3D_SRV_DIMENSION_TEXTURE2D;
V_RETURN(pd3dDevice->CreateShaderResourceView(g_pCurrentImage, &rvDesc, &g_pImageRV)); </code>
in OnD3D11FrameRender:
HRESULT okay;
if( !g_updateDone ) {
D3D11_MAPPED_SUBRESOURCE resource;
resource.pData = mImage.GetData();
resource.RowPitch = 640;
resource.DepthPitch = 1;
okay = pd3dImmediateContext->Map(g_pCurrentImage, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
g_updateDone = true;
}
pd3dImmediateContext->PSSetShaderResources(0, 1, &g_pImageRV);
This returns no errors so far, everything seems to work.
The HLSL Shader:
//-----
// Textures and Samplers
//-----
Texture2D <int> g_txDiffuse : register( t0 );
SamplerState g_samLinear : register( s0 );
//-----
// shader input/output structure
//-----
struct VS_INPUT
{
float4 Position : POSITION; // vertex position
float2 TextureUV : TEXCOORD0;// vertex texture coords
};
struct VS_OUTPUT
{
float4 Position : SV_POSITION; // vertex position
float2 TextureUV : TEXCOORD0; // vertex texture coords
};
//-----
// Vertex shader
//-----
VS_OUTPUT RenderSceneVS( VS_INPUT input )
{
VS_OUTPUT Output;
Output.Position = input.Position;
Output.TextureUV = input.TextureUV;
return Output;
}
//-----
// Pixel Shader
//-----
float4 RenderScenePS( VS_OUTPUT In ) : SV_TARGET
{
int3 loc;
loc.x = 0;
loc.y = 0;
loc.z = 1;
int r = g_txDiffuse.Load(loc);
//float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, 1);
}
The thing is, I can't even debug it in PIX to see what r results in, because even with Shader optimization disabled, the line int r = ... is never reached
I tested
float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, fTest);
but this would result in "cannot map expression to pixel shader instruction set", even though it's a float.
So how do I read and use 8bit integers from a texture, and if possible, with no sampling at all.
Thanks for any feedback.
Oh my this is a really old question, I thought it said 2012!
But anyway as it's still open:
Due to the nature of GPU's being optimised for floating point arithmetic, you probably wont get a great deal of performance advantage by using a Texture2D<int> over a Texture2D<float>.
You could attempt to use a Texture2D<float> and then try:
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, g_txDiffuse.Load(loc));
loc.z = 1;
Should be 0 here, because texture mip levels is 1 in your case, and mipmaps start at 0 in HLSL for Load intrinsic.