I am attempting to do some processing in the pixel shader on a texture. The data for the texture is coming from a memory chunk of 8 bit data. The problem I am facing is how to read the data in the shader.
Code to create the texture and ressource view:
In OnD3D11CreateDevice:
D3D11_TEXTURE2D_DESC tDesc;
tDesc.Height = 480;
tDesc.Width = 640;
tDesc.Usage = D3D11_USAGE_DYNAMIC;
tDesc.MipLevels = 1;
tDesc.ArraySize = 1;
tDesc.SampleDesc.Count = 1;
tDesc.SampleDesc.Quality = 0;
tDesc.Format = DXGI_FORMAT_R8_UINT;
tDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tDesc.MiscFlags = 0;
V_RETURN(pd3dDevice->CreateTexture2D(&tDesc, NULL, &g_pCurrentImage));
D3D11_SHADER_RESOURCE_VIEW_DESC rvDesc;
g_pCurrentImage->GetDesc(&tDesc);
rvDesc.Format = DXGI_FORMAT_R8_UINT;
rvDesc.Texture2D.MipLevels = tDesc.MipLevels;
rvDesc.Texture2D.MostDetailedMip = tDesc.MipLevels - 1;
rvDesc.ViewDimension = D3D_SRV_DIMENSION_TEXTURE2D;
V_RETURN(pd3dDevice->CreateShaderResourceView(g_pCurrentImage, &rvDesc, &g_pImageRV)); </code>
in OnD3D11FrameRender:
HRESULT okay;
if( !g_updateDone ) {
D3D11_MAPPED_SUBRESOURCE resource;
resource.pData = mImage.GetData();
resource.RowPitch = 640;
resource.DepthPitch = 1;
okay = pd3dImmediateContext->Map(g_pCurrentImage, 0, D3D11_MAP_WRITE_DISCARD, 0, &resource);
g_updateDone = true;
}
pd3dImmediateContext->PSSetShaderResources(0, 1, &g_pImageRV);
This returns no errors so far, everything seems to work.
The HLSL Shader:
//-----
// Textures and Samplers
//-----
Texture2D <int> g_txDiffuse : register( t0 );
SamplerState g_samLinear : register( s0 );
//-----
// shader input/output structure
//-----
struct VS_INPUT
{
float4 Position : POSITION; // vertex position
float2 TextureUV : TEXCOORD0;// vertex texture coords
};
struct VS_OUTPUT
{
float4 Position : SV_POSITION; // vertex position
float2 TextureUV : TEXCOORD0; // vertex texture coords
};
//-----
// Vertex shader
//-----
VS_OUTPUT RenderSceneVS( VS_INPUT input )
{
VS_OUTPUT Output;
Output.Position = input.Position;
Output.TextureUV = input.TextureUV;
return Output;
}
//-----
// Pixel Shader
//-----
float4 RenderScenePS( VS_OUTPUT In ) : SV_TARGET
{
int3 loc;
loc.x = 0;
loc.y = 0;
loc.z = 1;
int r = g_txDiffuse.Load(loc);
//float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, 1);
}
The thing is, I can't even debug it in PIX to see what r results in, because even with Shader optimization disabled, the line int r = ... is never reached
I tested
float fTest = (float) r;
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, fTest);
but this would result in "cannot map expression to pixel shader instruction set", even though it's a float.
So how do I read and use 8bit integers from a texture, and if possible, with no sampling at all.
Thanks for any feedback.
Oh my this is a really old question, I thought it said 2012!
But anyway as it's still open:
Due to the nature of GPU's being optimised for floating point arithmetic, you probably wont get a great deal of performance advantage by using a Texture2D<int> over a Texture2D<float>.
You could attempt to use a Texture2D<float> and then try:
return float4( In.TextureUV.x, In.TextureUV.y, In.TextureUV.x + In.TextureUV.y, g_txDiffuse.Load(loc));
loc.z = 1;
Should be 0 here, because texture mip levels is 1 in your case, and mipmaps start at 0 in HLSL for Load intrinsic.
Related
I am trying to make a texture to cover a triangle instead of color, but it looks like there is something wrong with the Texture Initialization...
Texture loading is fine, I tried it with Opengl and It worked perfectly however it isn't working in Directx 11.
void DX11Texture2D::Create(Texture_Data Data, Texture_Desc Desc)
{
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(texDesc));
texDesc.Width = Data.width;
texDesc.Height = Data.height;
texDesc.MipLevels = 0;
texDesc.ArraySize = 1;
texDesc.Format = GetDXTextureFormat(Desc.Format);
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA subData;
subData.pSysMem = Data.databuf;
subData.SysMemPitch = Data.width * 4;
subData.SysMemSlicePitch = Data.width * Data.height * 4;
Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
//TODO: Add a way to disable and enable mip map
Core::Internals::DX11Renderer::GetDevice()->CreateShaderResourceView(textureID, 0, &resourceView);
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.AddressU = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressV = GetDXTextureWrap(Desc.Wrap);
samplerDesc.AddressW = GetDXTextureWrap(Desc.Wrap);
samplerDesc.Filter = GetDXTextureFilter(Desc.Filter);
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
Core::Internals::DX11Renderer::GetDevice()->CreateSamplerState(&samplerDesc, &samplerState);
}
void DX11Texture2D::Bind(unsigned int index)
{
Core::Internals::DX11Renderer::GetContext()->PSSetShaderResources(index, 1, &resourceView);
Core::Internals::DX11Renderer::GetContext()->PSSetSamplers(index, 1, &samplerState);
}
DXGI_FORMAT DX11Texture2D::GetDXTextureFormat(API::TextureFormat format)
{
switch (format)
{
case API::TextureFormat::R8: return DXGI_FORMAT_R8_UNORM;
case API::TextureFormat::R8G8: return DXGI_FORMAT_R8G8_UNORM;
case API::TextureFormat::R8G8B8: return DXGI_FORMAT_R8G8B8A8_UNORM;
case API::TextureFormat::R8G8B8A8: return DXGI_FORMAT_R8G8B8A8_UNORM;
default: return DXGI_FORMAT_R8G8B8A8_UNORM;
}
}
D3D11_TEXTURE_ADDRESS_MODE DX11Texture2D::GetDXTextureWrap(API::TextureWrap textureWrap)
{
switch (textureWrap)
{
case API::TextureWrap::Repeat: return D3D11_TEXTURE_ADDRESS_WRAP;
case API::TextureWrap::MirroredReapeat: return D3D11_TEXTURE_ADDRESS_MIRROR;
case API::TextureWrap::ClampToEdge: return D3D11_TEXTURE_ADDRESS_CLAMP;
case API::TextureWrap::ClampToBorder: return D3D11_TEXTURE_ADDRESS_BORDER;
default: return D3D11_TEXTURE_ADDRESS_WRAP;
}
}
D3D11_FILTER DX11Texture2D::GetDXTextureFilter(API::TextureFilter textureFilter)
{
//TODO: Add more texture filter types to control both min and mag
switch (textureFilter)
{
case API::TextureFilter::Nearest: return D3D11_FILTER_MIN_MAG_MIP_POINT;
case API::TextureFilter::Linear: return D3D11_FILTER_MIN_MAG_MIP_LINEAR;
default: return D3D11_FILTER_MIN_MAG_MIP_POINT;
}
}
Also my shaders
Triangle.vs:
struct VertexInputType
{
float3 position : POSITION;
float3 color : COLOR;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = float4(input.position, 1);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
}
Triangle.ps
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D shaderTexture : register(t0);;
SamplerState SampleType : register(s0);;
float4 main(PixelInputType input) : SV_TARGET
{
return shaderTexture.Sample(SampleType, input.tex);
}
Cheers,
Zlixine.
It seems you are creating a texture with a full mip map chain, but looking at initial data it looks like you provide only one slice (if your sampler tries to access another slice, since no data was provided, it will be black)
In that case you should enforce single mip by using :
texDesc.MipLevels = 1;
The line:
subData.SysMemSlicePitch = Data.width * Data.height * 4;
is not needed, as this parameter is ignored in case of 2d texture (you can leave it to 0 or any value).
Also, you should check result codes when creating resources eg:
HRESULT hr = Core::Internals::DX11Renderer::GetDevice()->CreateTexture2D(&texDesc, &subData, &textureID);
if FAILED(hr)
{
//Handle issue if texture creation did fail
}
And make sure to use D3D11CreateDevice with D3D11_CREATE_DEVICE_DEBUG,
so when debugging, you will have meaningful error messages in your visual studio output window (if creation fails, you will have an explanation instead of getting only INVALIDARG hresult, which is not really useful)
I have reduced a previous rending problem to a core where I am stuck.
I have a vertex buffer, consisting of 4 vertices, arranged in a plane (labeled 0 to 3):
1. .2
0. .3
and an according index buffer {0,1,2,3,0}.
Now, when I render with D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP, I achieve the expected image:
__
| |
|__|
However, when I render with D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP the result is:
| /|
|/ |
Note that no filling of triangles is performed.
Even more confusing, when using D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST the result is:
|
|
If I change the index buffer to {0,1,2,0,2,3} it renders:
| /
|/
That is, just one pixel line between the first two vertices are being drawn.
I have reduced my shaders to the most primitive examples:
Vertex shader:
struct VertexInputType
{
float4 position : POSITION;
};
struct PixelInputType
{
float4 position : SV_POSITION;
};
PixelInputType VertexShader(VertexInputType input)
{
PixelInputType output;
input.position.w = 1.0f;
output.position = input.position;
return output;
}
Pixel shader:
struct PixelInputType
{
float4 position : SV_POSITION;
};
float4 PixelShader(PixelInputType input) : SV_TARGET
{
float4 color;
color.r = 0;
color.g = 0;
color.b = 0;
color.a = 1;
return color;
}
As vertices I'm using DirectX::XMFLOAT3:
D3D11_INPUT_ELEMENT_DESC polygon_layout[1];
polygon_layout[0].SemanticName = "POSITION";
polygon_layout[0].SemanticIndex = 0;
polygon_layout[0].Format = DXGI_FORMAT_R32G32B32_FLOAT;
polygon_layout[0].InputSlot = 0;
polygon_layout[0].AlignedByteOffset = 0;
polygon_layout[0].InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
polygon_layout[0].InstanceDataStepRate = 0;
d3d11_device->CreateInputLayout(polygon_layout, 1, compiled_vshader_buffer->GetBufferPointer(), compiled_vshader_buffer->GetBufferSize(), &input_layout);
D3D11_BUFFER_DESC vertex_buffer_desc;
vertex_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
vertex_buffer_desc.ByteWidth = sizeof(DirectX::XMFLOAT3) * 4;
vertex_buffer_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertex_buffer_desc.CPUAccessFlags = 0;
vertex_buffer_desc.MiscFlags = 0;
vertex_buffer_desc.StructureByteStride = 0;
DirectX::XMFLOAT3 vertices[4];
vertices[0].x = -0.5; vertices[0].y = -0.5; vertices[0].z = 0;
vertices[1].x = -0.5; vertices[1].y = 0.5; vertices[1].z = 0;
vertices[2].x = 0.5; vertices[2].y = 0.5; vertices[2].z = 0;
vertices[3].x = 0.5; vertices[3].y = -0.5; vertices[3].z = 0;
D3D11_SUBRESOURCE_DATA vertex_buffer_data;
vertex_buffer_data.pSysMem = vertices;
vertex_buffer_data.SysMemPitch = 0;
vertex_buffer_data.SysMemSlicePitch = 0;
hr = d3d11_device->CreateBuffer(&vertex_buffer_desc, &vertex_buffer_data, &vertex_buffer);
D3D11_BUFFER_DESC index_buffer_desc;
index_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
index_buffer_desc.ByteWidth = sizeof(int32_t) * 6;
index_buffer_desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
index_buffer_desc.CPUAccessFlags = 0;
index_buffer_desc.MiscFlags = 0;
index_buffer_desc.StructureByteStride = 0;
int32_t indices[6];
indices[0] = 0;
indices[1] = 1;
indices[2] = 2;
indices[3] = 2;
indices[4] = 3;
indices[5] = 0;
D3D11_SUBRESOURCE_DATA index_buffer_data;
index_buffer_data.pSysMem = indices;
index_buffer_data.SysMemPitch = 0;
index_buffer_data.SysMemSlicePitch = 0;
hr = d3d11_device->CreateBuffer(&index_buffer_desc, &index_buffer_data, &index_buffer);
// during rendering I set:
unsigned int stride = sizeof(DirectX::XMFLOAT3);
unsigned int offset = 0;
d3d11_context->IASetVertexBuffers(0, 1, &vertex_buffer, &stride, &offset);
d3d11_context->IASetIndexBuffer(index_buffer, DXGI_FORMAT_R32_UINT, 0);
d3d11_context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3d11_context->RSSetState(rasterizer_state);
d3d11_context->IASetInputLayout(input_layout);
d3d11_context->VSSetShader(vertex_shader, NULL, 0);
d3d11_context->PSSetShader(pixel_shader, NULL, 0);
// and render with:
d3d11_context->DrawIndexed(6, 0, 0);
When I look at the shaders with the ID3D11ShaderReflection::GetGSInputPrimitive(), I receive D3D_PRIMITIVE_UNDEFINED for both the vertex shader and pixel shader.
I am setting the rasterizer stage with D3D11_FILL_SOLID and D3D11_CULL_NONE.
Is there any setting or state in the D3D11 context that could explain such a behavior?
I'm happy for any ideas where to look.
Thanks in advance!
Firstly, triangle strip draws exactly what you'd expect - a sequence of triangles. Each index into the triangle index array is combined with the two previous indices to create a triangle.
I'd suggest that as your Triangle List is not divisible by 3 DirectX may be rendering incorrectly (remember that as this is a high-capacity system it skips checks and balances where it can to promote speed).
Try to draw your expected results on paper after reviewing the logic behind each of the draw modes - list, strip, fan etc to be sure that you are using the correct vertex ordering and drawmode.
Good luck!
It turns out that the code was not the problem. Somewhere prior something was changed in the Direct3D state.
Calling context->ClearState(); solved the issue.
I have been trying to add a particle system to my Directx11 graphics demo, and so i have been using the 'Introduction to 3d Game Programming with directx 11' book.
Because of this am attempting to use a HLSL StreamOut technique to update the particle system and a separate technique to render the particles.
Below is the HLSL code for the particle system, i have tried adjusting the acceleration and velocity in the off chance the speed was sending the particles off the screen, however this had no effect.
cbuffer cbPerFrame
{
float3 gEyePosW;
float3 gEmitPosW;
float3 gEmitDirW;
float gGameTime;
float gTimeStep;
float4x4 gViewProj;
};
cbuffer cbFixed
{
// Net constant acceleration used to accerlate the particles.
float3 gAccelW = {0.0f, 0.2f, 0.0f};
// Texture coordinates for billbording are always the same - we use a qquad in this effect :)
float2 gTexC[4] =
{
float2(0.0f, 1.0f),
float2(0.0f, 0.0f),
float2(1.0f, 1.0f),
float2(1.0f, 0.0f)
};
};
// Nonnumeric values cannot be added to a cbuffer.
Texture2DArray gTextureMapArray;
// Random texture used to generate random numbers in shaders.
Texture1D gRandomTexture;
SamplerState samLinear
{
Filter = MIN_MAG_MIP_LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};
DepthStencilState DisableDepth
{
DepthEnable = FALSE;
DepthWriteMask = ZERO;
};
DepthStencilState NoDepthWrites
{
DepthEnable = TRUE;
DepthWriteMask = ZERO;
};
BlendState AdditiveBlending
{
AlphaToCoverageEnable = FALSE;
BlendEnable[0] = TRUE;
SrcBlend = SRC_ALPHA;
DestBlend = ONE;
BlendOp = ADD;
SrcBlendAlpha = ZERO;
DestBlendAlpha = ZERO;
BlendOpAlpha = ADD;
RenderTargetWriteMask[0] = 0x0F;
};
///////////////////////////////////////////////////////////////
// Helper functions
//
///////////////////////////////////////////////////////////////
float3 RandUnitVec3(float offset)
{
// Use game time plus offset to sample random texture.
float u = (gGameTime + offset);
// coordinates in [-1,1]
float3 v = gRandomTexture.SampleLevel(samLinear, u, 0).xyz;
// project onto unit sphere (Normalize)
return normalize(v);
}
///////////////////////////////////////////////////////////////
// Stream Out Technique
//
///////////////////////////////////////////////////////////////
#define PT_EMITTER 0
#define PT_FLARE 1
struct Particle
{
float3 InitPosW : POSITION;
float3 InitVelW : VELOCITY;
float2 SizeW : SIZE;
float Age : AGE;
uint Type : Type;
};
Particle StreamOutVS(Particle vin)
{
return vin;
}
// The stream-out GS is just responsible for emitting
// new particles and destroying old particles. The logic
// programed here will generally vary from particle system
// to particle system, as the destroy/spawn rules will be
// different.
[maxvertexcount(2)]
void StreamOutGS(point Particle gin[1],
inout PointStream<Particle> ptStream)
{
gin[0].Age += gTimeStep;
// if particle is emitter particle
if (gin[0].Type == PT_EMITTER)
{
// If it's time to emit new particle
if (gin[0].Age > 0.005f)
{
float3 vRandom = RandUnitVec3(0.0f);
vRandom.x *= 0.5f;
vRandom.z *= 0.5f;
Particle p;
p.InitPosW = gEmitPosW.xyz;
p.InitVelW = 0.5f*vRandom;
p.SizeW = float2(3.0f, 3.0f);
p.Age = 0.0f;
p.Type = PT_FLARE;
ptStream.Append(p);
// reset the time to emit
gin[0].Age = 0.0f;
}
// always keep emitters
ptStream.Append(gin[0]);
}
else
{
// Set conditions to keep a particle - in this case age limit
if (gin[0].Age <= 1.0f)
{
ptStream.Append(gin[0]);
}
}
}
GeometryShader gsStreamOut = ConstructGSWithSO(
CompileShader( gs_5_0, StreamOutGS() ),
"POSITION.xyz; VELOCITY.xyz; SIZE.xyz; AGE.x; TYPE.x" );
technique11 StreamOutTech
{
pass P0
{
SetVertexShader( CompileShader( vs_5_0, StreamOutVS() ) );
SetGeometryShader( gsStreamOut );
// disable pixel shader for stream-out only
SetPixelShader(NULL);
// we must also disable the depth buffer for stream-out only
SetDepthStencilState( DisableDepth, 0 );
}
}
///////////////////////////////////////////////////////////////
// Draw Technique
//
///////////////////////////////////////////////////////////////
struct VertexIn
{
float3 Pos : POSITION;
float2 SizeW : SIZE;
};
struct VertexOut
{
float3 PosW : POSITION;
float2 SizeW : SIZE;
float4 Colour : COLOR;
uint Type : TYPE;
};
VertexOut DrawVS(Particle vin)
{
VertexOut vout;
float t = vin.Age;
// constant Acceleration equation
vout.PosW = 0.5f*t*t*gAccelW + t*vin.InitVelW + vin.InitPosW;
// fade colour with time
float opacity = 1.0f - smoothstep(0.0f, 1.0f, t/1.0f);
vout.Colour = float4(1.0f, 1.0f, 1.0f, opacity);
vout.SizeW = vin.SizeW;
vout.Type = vin.Type;
return vout;
}
struct GeoOut
{
float4 PosH : SV_POSITION;
float4 Colour : COLOR;
float2 Tex : TEXCOORD;
};
// Expand each 'Point' into a quad (4 verticies)
[maxvertexcount(4)]
void DrawGS(point VertexOut gin[1],
inout TriangleStream<GeoOut> triStream)
{
// Do not draw Emiter particles in this system
if (gin[0].Type != PT_EMITTER)
{
//
// Compute world matrix so that billboard faces the camera.
//
float3 look = normalize(gEyePosW.xyz - gin[0].PosW);
float3 right = normalize(cross(float3(0,1,0), look));
float3 up = cross(look, right);
//
// Compute triangle strip vertices (quad) in world space.
//
float halfWidth = 0.5f*gin[0].SizeW.x;
float halfHeight = 0.5f*gin[0].SizeW.y;
float4 v[4];
v[0] = float4(gin[0].PosW + halfWidth*right - halfHeight*up, 1.0f);
v[1] = float4(gin[0].PosW + halfWidth*right + halfHeight*up, 1.0f);
v[2] = float4(gin[0].PosW - halfWidth*right - halfHeight*up, 1.0f);
v[3] = float4(gin[0].PosW - halfWidth*right + halfHeight*up, 1.0f);
//
// Transform quad vertices to world space and output
// them as a triangle strip.
//
GeoOut gout;
[unroll]
for(int i = 0; i < 4; ++i)
{
gout.PosH = mul(v[i], gViewProj);
gout.Tex = gTexC[i];
gout.Colour = gin[0].Colour;
triStream.Append(gout);
}
}
}
float DrawPS(GeoOut pin) : SV_TARGET
{
return gTextureMapArray.Sample(samLinear, float3(pin.Tex, 0)) * pin.Colour;
}
technique11 DrawTech
{
pass P0
{
SetVertexShader( CompileShader( vs_5_0, DrawVS() ) );
SetGeometryShader( CompileShader( gs_5_0, DrawGS() ) );
SetPixelShader( CompileShader( ps_5_0, DrawPS() ) );
SetBlendState(AdditiveBlending, float4(0.0f, 0.0f, 0.0f, 0.0f), 0xffffffff);
SetDepthStencilState( NoDepthWrites, 0 );
}
}
Below is the code for building the VB.
void ParticleSystem::BuildVB(ID3D11Device* device)
{
/////////////////////////////////////////////////////////
// Create the buffer to start the particle system.
/////////////////////////////////////////////////////////
D3D11_BUFFER_DESC vbd;
vbd.Usage = D3D11_USAGE_DEFAULT;
vbd.ByteWidth = sizeof(Vertex::Particle) * 1;
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd.CPUAccessFlags = 0;
vbd.MiscFlags = 0;
vbd.StructureByteStride = 0;
// The initial particle emitter has type 0 and age 0. The rest
// of the particle attributes do not apply to an emitter.
Vertex::Particle p;
ZeroMemory(&p, sizeof(Vertex::Particle));
p.Age = 0.0f;
p.Type = 0;
D3D11_SUBRESOURCE_DATA vinitData;
vinitData.pSysMem = &p;
HR(device->CreateBuffer(&vbd, &vinitData, &mInitVB));
//////////////////////////////////////////////////////////////
// Create the buffers which swap back and forth for stream-out and drawing.
//////////////////////////////////////////////////////////////
vbd.ByteWidth = sizeof(Vertex::Particle) * mMaxParticles;
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER | D3D11_BIND_STREAM_OUTPUT;
HR(device->CreateBuffer(&vbd, 0, &mDrawVB));
HR(device->CreateBuffer(&vbd, 0, &mStreamOutVB));
}
And now the Draw cpp code.
void ParticleSystem::Draw(ID3D11DeviceContext* dc, const XMMATRIX& viewProj)
{
//
// Set constants.
//
mFX->SetViewProj(viewProj);
mFX->SetGameTime(mGameTime);
mFX->SetTimeStep(mTimeStep);
mFX->SetEyePosW(mEyePosW);
mFX->SetEmitPosW(mEmitPosW);
mFX->SetEmitDirW(mEmitDirW);
mFX->SetTexArray(mTextureArraySRV);
mFX->SetRandomTex(mRandomTextureSRV);
//
// Set IA stage.
//
dc->IASetInputLayout(InputLayouts::Particle);
dc->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
UINT stride = sizeof(Vertex::Particle);
UINT offset = 0;
// On the first pass, use the initialization VB. Otherwise, use
// the VB that contains the current particle list.
if( mFirstRun )
dc->IASetVertexBuffers(0, 1, &mInitVB, &stride, &offset);
else
dc->IASetVertexBuffers(0, 1, &mDrawVB, &stride, &offset);
//
// Draw the current particle list using stream-out only to update them.
// The updated vertices are streamed-out to the target VB.
//
dc->SOSetTargets(1, &mStreamOutVB, &offset);
D3DX11_TECHNIQUE_DESC techDesc;
mFX->StreamOutTech->GetDesc( &techDesc );
for(UINT p = 0; p < techDesc.Passes; ++p)
{
mFX->StreamOutTech->GetPassByIndex( p )->Apply(0, dc);
if(mFirstRun)
{
dc->Draw(1, 0);
mFirstRun = false;
}
else
{
dc->DrawAuto();
}
}
// done streaming-out--unbind the vertex buffer
ID3D11Buffer* bufferArray[1] = {0};
dc->SOSetTargets(1, bufferArray, &offset);
// ping-pong the vertex buffers
std::swap(mDrawVB, mStreamOutVB);
//
// Draw the updated particle system we just streamed-out.
//
dc->IASetVertexBuffers(0, 1, &mDrawVB, &stride, &offset);
mFX->DrawTech->GetDesc( &techDesc );
for(UINT p = 0; p < techDesc.Passes; ++p)
{
mFX->DrawTech->GetPassByIndex( p )->Apply(0, dc);
dc->DrawAuto();
}
}
I had thought that perhaps either a blend state or Depth state in use by some of the other objects in my scene may be causing the issue, (perhaps i have misunderstood something I have set earlier). I tried removing all other render code leaving just the draw code above, but with no results.
To my mind, I can only think of a few possible causes for the issue i am having, but so far i am unable to find a solution.
The scale of the system is wrong for my scene. eg. the particles are drawing but are moving off the screen to fast to be seen.
As mentioned above i have tried removing the acceleration and velocity of the particles in the HLSL code, in order to see stationary particles. This had no effect.
Blend state/Depth stencil state is incorrect. Eg. As mentioned above.
The Emitter particle is, for some reason, not being produced/placed correctly. causing no 'drawable' particles to be produced in turn.
As most of this code is in the .fx file, I am unable to step through to check the emitter particles. I think this is the more likely issue, but i have been wrong before.
Any help on this would be greatly appreciated, I am well and truly stuck on this.
Below I have added additional snippets of code which may be of use.
E.G. Input layout.
const D3D11_INPUT_ELEMENT_DESC InputLayoutDesc::Particle[5] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"VELOCITY", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"SIZE", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"AGE", 0, DXGI_FORMAT_R32_FLOAT, 0, 32, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"TYPE", 0, DXGI_FORMAT_R32_UINT, 0, 36, D3D11_INPUT_PER_VERTEX_DATA, 0},
};
And particle system INIT
mFire.Init(md3dDevice, AppEffects::FireFX, mFlareTextureSRV, mRandomTextureSRV, 500);
mFire.SetEmitPos(XMFLOAT3(1.0f, 0.5f, 0.0f));
void ParticleSystem::Init(ID3D11Device* device, ParticleEffect* fx,
ID3D11ShaderResourceView* textureArraySRV,
ID3D11ShaderResourceView* randomTextureSRV,
UINT maxParticles)
{
mMaxParticles = maxParticles;
mFX = fx;
mTextureArraySRV = textureArraySRV;
mRandomTextureSRV = randomTextureSRV;
BuildVB(device);
}
If i have missed any section of the code you would need to see, let me know.
Thanks in advance.
Sorry about this stupid question all. Turns out it was a stupid mistake in the pixel shader. I had set the pixel shader as a float instead of float4.
I've been trying to get a series of 2D images, all same in size and formatted as a raw data, loaded into a 3D texture and then render that 3D texture on the screen. As I'm still pretty new with DirectX, I'm not sure what I'm doing wrong. Here's the relevant code:
// Open the image and copy its contents into a buffer
std::ifstream image("head256x256x109", std::ifstream::binary);
int size;
char* buffer;
if (image.is_open()) {
image.seekg(0, image.end);
size = image.tellg();
image.seekg(0, image.beg);
buffer = new char[size];
image.read(buffer, size);
image.close();
} else {
return false;
}
// Convert the data to RGBA data
// Here we are simply putting the same value to R, G, B and A channels.
char* RGBABuffer = new char[ size*4 ];
for( int nIndx = 0; nIndx < size; ++nIndx )
{
RGBABuffer[nIndx*4] = buffer[nIndx];
RGBABuffer[nIndx*4+1] = buffer[nIndx];
RGBABuffer[nIndx*4+2] = buffer[nIndx];
RGBABuffer[nIndx*4+3] = buffer[nIndx];
}
// Create the texture
D3D11_TEXTURE3D_DESC texture_desc;
ZeroMemory(&texture_desc, sizeof(texture_desc));
texture_desc.Width = 256;
texture_desc.Height = 256;
texture_desc.Depth = 109;
texture_desc.MipLevels = 1;
texture_desc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
texture_desc.Usage = D3D11_USAGE_DEFAULT;
texture_desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
D3D11_SUBRESOURCE_DATA image_data;
ZeroMemory(&image_data, sizeof(image_data));
image_data.pSysMem = RGBABuffer;
image_data.SysMemPitch = 256 * 4;
image_data.SysMemSlicePitch = 256 * 256 * 4;
d3dResult = d3dDevice_->CreateTexture3D(&texture_desc, &image_data, &texture_);
// Create a sampler
D3D11_SAMPLER_DESC colorMapDesc;
ZeroMemory( &colorMapDesc, sizeof( colorMapDesc ) );
colorMapDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
colorMapDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
colorMapDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
colorMapDesc.MaxLOD = D3D11_FLOAT32_MAX;
d3dResult = d3dDevice_->CreateSamplerState( &colorMapDesc, &colorMapSampler_ );
Shader code is as simple as possible:
Texture3D colorMap_ : register( t0 );
SamplerState colorSampler_ : register( s0 );
struct VS_Input
{
float4 pos : POSITION;
float4 tex0 : TEXCOORD0;
};
struct PS_Input
{
float4 pos : SV_POSITION;
float4 tex0 : TEXCOORD0;
};
PS_Input VS_Main( VS_Input vertex )
{
PS_Input vsOut = ( PS_Input )0;
vsOut.pos = vertex.pos;
vsOut.tex0 = vertex.tex0;
return vsOut;
}
float4 PS_Main( PS_Input frag ) : SV_TARGET
{
return colorMap_.Sample( colorSampler_, frag.tex0 );
}
Now I know that I would need to implement some sort of alpha blending for this to work properly, but I hoped I would at least get a first slice of the picture shown with this. However, all I get is the background color that I set in my Render function:
float clearColor[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
d3dContext_->ClearRenderTargetView( backBufferTarget_, clearColor );
Any help on what I've might be doing wrong as well as any further tips that I would need to do to achieve this goal (making a 3D texture out of a series of images and render it on screen) would be hugely helpful as there is very little documentation about this on the web.
I have a Windows Phone 8 C#/XAML project with DirectX component. I'm trying to rendering some particles. I create a vertex buffer, which I saw it go into the function to create, but when it gets to an update vertex buffer, the buffer is NULL. I have not released the buffers yet. Do you know why this will happen? Does any of the output messages help? Thanks.
Printouts and errors on my Output Window:
'TaskHost.exe' (Win32): Loaded '\Device\HarddiskVolume4\Windows\System32\d3d11_1SDKLayers.dll'. Cannot find or open the PDB file.
D3D11 WARNING: ID3D11Texture2D::SetPrivateData: Existing private data of same name with different size found! [ STATE_SETTING WARNING #55: SETPRIVATEDATA_CHANGINGPARAMS]
Create vertex buffer
D3D11 WARNING: ID3D11DeviceContext::DrawIndexed: The Pixel Shader unit expects a Sampler to be set at Slot 0, but none is bound. This is perfectly valid, as a NULL Sampler maps to default Sampler state. However, the developer may not want to rely on the defaults. [ EXECUTION WARNING #352: DEVICE_DRAW_SAMPLER_NOT_SET]
The thread 0xb64 has exited with code 0 (0x0).
m_vertexBuffer is null
D3D11 WARNING: ID3D11DeviceContext::DrawIndexed: The Pixel Shader unit expects a Sampler to be set at Slot 0, but none is bound. This is perfectly valid, as a NULL Sampler maps to default Sampler state. However, the developer may not want to rely on the defaults. [ EXECUTION WARNING #352: DEVICE_DRAW_SAMPLER_NOT_SET]
m_vertexBuffer is null
In CreateDeviceResources, I call CreateVertexShader, CreateInputLayout, CreatePixelShader, CreateBuffer. Then I get to creating the sampler and vertex buffer, code below:
auto createCubeTask = (createPSTask && createVSTask).then([this] () {
// Create a texture sampler state description.
D3D11_SAMPLER_DESC samplerDesc;
samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.MipLODBias = 0.0f;
samplerDesc.MaxAnisotropy = 1;
samplerDesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerDesc.BorderColor[0] = 0;
samplerDesc.BorderColor[1] = 0;
samplerDesc.BorderColor[2] = 0;
samplerDesc.BorderColor[3] = 0;
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
// Create the texture sampler state.
HRESULT result = m_d3dDevice->CreateSamplerState(&samplerDesc, &m_sampleState);
if(FAILED(result))
{
OutputDebugString(L"Can't CreateSamplerState");
}
//InitParticleSystem();
// Set the maximum number of vertices in the vertex array.
m_vertexCount = m_maxParticles * 6;
// Set the maximum number of indices in the index array.
m_indexCount = m_vertexCount;
// Create the vertex array for the particles that will be rendered.
m_vertices = new VertexType[m_vertexCount];
if(!m_vertices)
{
OutputDebugString(L"Can't create the vertex array for the particles that will be rendered.");
}
else
{
// Initialize vertex array to zeros at first.
int sizeOfVertexType = sizeof(VertexType);
int totalSizeVertex = sizeOfVertexType * m_vertexCount;
memset(m_vertices, 0, totalSizeVertex);
D3D11_SUBRESOURCE_DATA vertexBufferData = {0};
vertexBufferData.pSysMem = m_vertices;
vertexBufferData.SysMemPitch = 0;
vertexBufferData.SysMemSlicePitch = 0;
int sizeOfMVertices = sizeof(m_vertices);
CD3D11_BUFFER_DESC vertexBufferDesc(
totalSizeVertex, // byteWidth
D3D11_BIND_VERTEX_BUFFER, // bindFlags
D3D11_USAGE_DYNAMIC, // D3D11_USAGE usage = D3D11_USAGE_DEFAULT
D3D11_CPU_ACCESS_WRITE, // cpuaccessFlags
0, // miscFlags
0 // structureByteStride
);
OutputDebugString(L"Create vertex buffer\n");
DX::ThrowIfFailed(
m_d3dDevice->CreateBuffer(
&vertexBufferDesc,
&vertexBufferData,
&m_vertexBuffer
)
);
}
unsigned long* indices = new unsigned long[m_indexCount];
if(!indices)
{
OutputDebugString(L"Can't create the index array.");
}
else
{
// Initialize the index array.
for(int i=0; i<m_indexCount; i++)
{
indices[i] = i;
}
// Set up the description of the static index buffer.
// Create the index array.
D3D11_BUFFER_DESC indexBufferDesc;
indexBufferDesc.Usage = D3D11_USAGE_DEFAULT;
indexBufferDesc.ByteWidth = sizeof(unsigned long) * m_indexCount;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexBufferDesc.CPUAccessFlags = 0;
indexBufferDesc.MiscFlags = 0;
indexBufferDesc.StructureByteStride = 0;
// Give the subresource structure a pointer to the index data.
D3D11_SUBRESOURCE_DATA indexData;
indexData.pSysMem = indices;
indexData.SysMemPitch = 0;
indexData.SysMemSlicePitch = 0;
// Create the index buffer.
DX::ThrowIfFailed(
m_d3dDevice->CreateBuffer(
&indexBufferDesc,
&indexData,
&m_indexBuffer
)
);
// Release the index array since it is no longer needed.
delete [] indices;
indices = 0;
}
});
createCubeTask.then([this] () {
m_loadingComplete = true;
});
}
My vertex is position, tex, and color:
struct VertexType
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT2 texture;
DirectX::XMFLOAT4 color;
};
Microsoft::WRL::ComPtr<ID3D11SamplerState> m_sampleState;
VertexType* m_vertices;
Microsoft::WRL::ComPtr<ID3D11Buffer> m_vertexBuffer;
const D3D11_INPUT_ELEMENT_DESC vertexDesc[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
VertexShader.HLSL:
cbuffer ModelViewProjectionConstantBuffer : register(b0)
{
matrix model;
matrix view;
matrix projection;
};
struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD0;
float4 color : COLOR;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
float4 color : COLOR;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// Change the position vector to be 4 units for proper matrix calculations.
input.position.w = 1.0f;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = mul(input.position, model);
output.position = mul(output.position, view);
output.position = mul(output.position, projection);
// Store the texture coordinates for the pixel shader.
output.tex = input.tex;
// Store the particle color for the pixel shader.
output.color = input.color;
return output;
}
After the call to: m_d3dDevice->CreateBuffer(&vertexBufferDesc, &vertexBufferData, &m_vertexBuffer) the m_vertexBuffer is not null.
But when I get to my Update() function, m_vertexBuffer is NULL!
D3D11_MAPPED_SUBRESOURCE mappedResource;
if (m_vertexBuffer == nullptr)
{
OutputDebugString(L"m_vertexBuffer is null\n");
}
else
{
// Lock the vertex buffer.
DX::ThrowIfFailed(m_d3dContext->Map(m_vertexBuffer.Get(), 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource));
// Get a pointer to the data in the vertex buffer.
VertexType * verticesPtr = (VertexType*)mappedResource.pData;
//// Copy the data into the vertex buffer.
int sizeOfVertices = sizeof(VertexType) * m_vertexCount;
memcpy(verticesPtr, (void*)m_vertices, sizeOfVertices);
//// Unlock the vertex buffer.
m_d3dContext->Unmap(m_vertexBuffer.Get(), 0);
}
About the sampler, you need to assign it to your pixelshader.
m_d3dContext.Get()->PSSetSamplers(0,1,&m_sampleState);
I was making the incorrect call on m_vertexBuffer when I go Render it.
This fixes the error with the m_vertexBuffer being null.
Previous:
m_d3dContext.Get()->IASetVertexBuffers(0, 1, &m_vertexBuffer, &stride, &offset);
Fix:
m_d3dContext.Get()->IASetVertexBuffers(0, 1, m_vertexBuffer.GetAddressOf(), &stride, &offset);
It doesn't fix the error with the sampler states though:
DrawIndexed: The Pixel Shader unit expects a Sampler to be set at Slot
0