DirectX11 Map / Unmap Runtime Error - c++

Whenever I use the map / unmap functions per frame my program errors with the warning
"Unhandled exception at 0x0F285A07 (atidxx32.dll) in Engine.exe: 0xC0000005: Access violation reading location 0x00000000
after about 20 seconds.
On the line result = device->CreateTexture2D(&textureDesc, NULL, &renderTargetTexture);
I believe this is because of the renderTargetTexture not being accessible in some way.
I set the texture in the following way at initialisation and then every frame. The program works fine if I do not update every frame but I need to do this to pass arrays to the GPU.
¬Setup Texture Description
¬¬Code breaks on last line of setting texture (CreateTexture2D). Seems to be the render target.
¬Map and Unmap the texture
¬Set the shader resource
Setting up the texture
bool setupTextureDesc(ID3D11Device* device, ID3D11DeviceContext* deviceContext, D3D11_TEXTURE2D_DESC& textureDesc)
{
HRESULT result;
// Initialize the render target texture description.
ZeroMemory(&textureDesc, sizeof(textureDesc));
// Setup the render target texture description.
textureDesc.Width = fluidBufferObj.width;
textureDesc.Height = fluidBufferObj.height;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.Usage = D3D11_USAGE_DYNAMIC;;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;;
textureDesc.MiscFlags = 0;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
// Create the render target texture.
result = device->CreateTexture2D(&textureDesc, NULL, &renderTargetTexture);
HRFAIL
}
Map and Unmap
D3D11_MAPPED_SUBRESOURCE mappedResource;
//Map the resources. Blocks the GPU from accessing the file.
result = deviceContext->Map(renderTargetTexture, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
HRFAIL
//Set the pixels
UCHAR* pTexels = (UCHAR*)mappedResource.pData;
//For 3D do the same but add "depthStart" as mappedResource.DepthPitch * depth
int startIndex = (float)( ((float)percentage / 100) * (float)textureDesc.Height );
for( UINT row = 0; row < textureDesc.Height; row++ )
{
//Row number * height
UINT rowStart = row * mappedResource.RowPitch;
for( UINT col = 0; col < textureDesc.Width; col++ )
{
if( row >= startIndex && row <= (startIndex + 10) )
{
//width * number of channels (r,g,b,a)
UINT colStart = col * 4;
pTexels[rowStart + colStart + 0] = 0; // Red
pTexels[rowStart + colStart + 1] = 0; // Green
pTexels[rowStart + colStart + 2] = 255; // Blue
pTexels[rowStart + colStart + 3] = 255; // Alpha
}
else
{
//width * number of channels (r,g,b,a)
UINT colStart = col * 4;
pTexels[rowStart + colStart + 0] = 255; // Red
pTexels[rowStart + colStart + 1] = 0; // Green
pTexels[rowStart + colStart + 2] = 0; // Blue
pTexels[rowStart + colStart + 3] = 255; // Alpha
}
}
}
//Free the resource
deviceContext->Unmap(renderTargetTexture, 0);
Setting render target
bool setupTextureDesc(ID3D11Device* device, ID3D11DeviceContext* deviceContext, D3D11_TEXTURE2D_DESC& textureDesc)
{
HRESULT result;
// Initialize the render target texture description.
ZeroMemory(&textureDesc, sizeof(textureDesc));
// Setup the render target texture description.
textureDesc.Width = fluidBufferObj.width;
textureDesc.Height = fluidBufferObj.height;
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
textureDesc.Usage = D3D11_USAGE_DYNAMIC;;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;;
textureDesc.MiscFlags = 0;
textureDesc.SampleDesc.Count = 1;
textureDesc.SampleDesc.Quality = 0;
// Create the render target texture.
result = device->CreateTexture2D(&textureDesc, NULL, &renderTargetTexture);
HRFAIL
}

Not sure what the error was but I seem to have fixed it by eliminating the setting up texture description and setting shader resource steps.
The every frame code is simply mapping and unmapping the texture now, seems the others were...unnecessary.
D3D11_MAPPED_SUBRESOURCE mappedResource;
//Map the resources. Blocks the GPU from accessing the file.
result = deviceContext->Map(renderTargetTexture, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
HRFAIL
D3D11_TEXTURE2D_DESC* tDesc = new D3D11_TEXTURE2D_DESC();
renderTargetTexture->GetDesc(tDesc);
//Set the pixels
UCHAR* pTexels = (UCHAR*)mappedResource.pData;
//For 3D do the same but add "depthStart" as mappedResource.DepthPitch * depth
int startIndex = (float)( ((float)percentage / 100) * (float)tDesc->Height );
for( UINT row = 0; row < tDesc->Height; row++ )
{
//Row number * height
UINT rowStart = row * mappedResource.RowPitch;
for( UINT col = 0; col < tDesc->Width; col++ )
{
if( row >= startIndex && row <= (startIndex + 10) )
{
//width * number of channels (r,g,b,a)
UINT colStart = col * 4;
pTexels[rowStart + colStart + 0] = 0; // Red
pTexels[rowStart + colStart + 1] = 0; // Green
pTexels[rowStart + colStart + 2] = 255; // Blue
pTexels[rowStart + colStart + 3] = 255; // Alpha
}
else
{
//width * number of channels (r,g,b,a)
UINT colStart = col * 4;
pTexels[rowStart + colStart + 0] = 255; // Red
pTexels[rowStart + colStart + 1] = 0; // Green
pTexels[rowStart + colStart + 2] = 0; // Blue
pTexels[rowStart + colStart + 3] = 255; // Alpha
}
}
}
//Free the resource
deviceContext->Unmap(renderTargetTexture, 0);

Related

Get coordinates of specified color of pixel on screen

im making a game bot, and in some moments it must find (on the whole screen) a specified color of some pixel(without specified coordinates), and if it has founded it, bot has to print coordinates(x, y) of founded color.
I have a this example
int click_color = color;
HDC dc = GetDC(hWnd);
COLORREF click_col = GetPixel(dc, x, y);
ReleaseDC(NULL, dc);
it works fine, but it search for a color on a specified coordinates. But i need it the other way around, to search it on the whole screen and print coordinates of that color.
Can anyone help me?
Sorry for my broken English)
I'm not aware of any function that does that for you, so I believe your best shot would be to look through every pixel coordinate to check if the color corresponds. Obviously the GetPixel() funtion is too slow for that, so it would be best to capture the entirity of the screen and then analyze sequentially the pixels salved. When I need to do something like this I use this class after including Windows.h:
class screenshot
{
public:
BYTE* lpbitmap;
int horizontal;
int vertical;
COLORREF* pxMap;
private:
HDC hScreenDC;
HDC hMemoryDC;
HBITMAP hBitmap;
HBITMAP hOldBitmap;
RECT desktop;
const HWND hDesktop = GetDesktopWindow();
BITMAPINFOHEADER bi;
DWORD dwBmpSize;
HANDLE hDIB;
int x;
int y;
union color
{
int col;
unsigned char abgr[4];
};
public:
screenshot()
{
hScreenDC = CreateDC(TEXT("DISPLAY"), NULL, NULL, NULL);
hMemoryDC = CreateCompatibleDC(hScreenDC);
x = GetDeviceCaps(hScreenDC, HORZRES);
y = GetDeviceCaps(hScreenDC, VERTRES);
GetWindowRect(hDesktop, &desktop);
horizontal = desktop.right;
vertical = desktop.bottom;
bi.biSize = sizeof(BITMAPINFOHEADER);
bi.biWidth = horizontal;
bi.biHeight = vertical;
bi.biPlanes = 1;
bi.biBitCount = 32;
bi.biCompression = BI_RGB;
bi.biSizeImage = 0;
bi.biXPelsPerMeter = 0;
bi.biYPelsPerMeter = 0;
bi.biClrUsed = 0;
bi.biClrImportant = 0;
dwBmpSize = ((horizontal * bi.biBitCount + 31) / 32) * 4 * vertical;
pxMap = new COLORREF[dwBmpSize];
lpbitmap = new BYTE[dwBmpSize];
hDIB = GlobalAlloc(GHND, dwBmpSize);
hBitmap = CreateCompatibleBitmap(hScreenDC, x, y);
}
void capture()
{
hOldBitmap = (HBITMAP)SelectObject(hMemoryDC, hBitmap);
BitBlt(hMemoryDC, 0, 0, horizontal, vertical, hScreenDC, 0, 0, SRCCOPY);
hBitmap = (HBITMAP)SelectObject(hMemoryDC, hOldBitmap);
GetDIBits(hMemoryDC, hBitmap, 0, (UINT)vertical, lpbitmap, (BITMAPINFO*)&bi, DIB_RGB_COLORS);
GetObject(hBitmap, sizeof(lpbitmap), &lpbitmap);
BYTE* img;
for (int Y = 0; Y < vertical; Y++)
{
for (int X = 0; X < horizontal; X++)
{
img = lpbitmap + ((vertical - 1 - Y) * horizontal + X) * 4;
pxMap[Y * horizontal + X] = RGB(img[2], img[1], img[0]);
}
}
}
~screenshot()
{
GlobalFree(hDIB);
DeleteDC(hMemoryDC);
DeleteDC(hScreenDC);
}
COLORREF getPixel(int X, int Y)
{
//300 414
return pxMap[Y * horizontal + X];
}
int getPixelRed(int X, int Y)
{
color col;
col.col = pxMap[Y * horizontal + X];
return col.abgr[0];
}
int getPixelGreen(int X, int Y)
{
color col;
col.col = pxMap[Y * horizontal + X];
return col.abgr[1];
}
int getPixelBlue(int X, int Y)
{
color col;
col.col = pxMap[Y * horizontal + X];
return col.abgr[2];
}
};
Simply create an instance of it, capture a screenshot when you need to and go nuts analyzing the returned pixels.

Issue rendering texture in C++ DirectX Tool Kit

I'm trying to load texture from .jpg file with function CreateWICtextureFromFile from DirectX Tool kit.
My image gets loaded as texture but the output has jitter and missing information.
This are my code snpshots
D3D11_SAMPLER_DESC samplerdesc;
samplerdesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
samplerdesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
samplerdesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
samplerdesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
samplerdesc.MipLODBias = 0.0f;
samplerdesc.MaxAnisotropy = 1;
samplerdesc.ComparisonFunc = D3D11_COMPARISON_ALWAYS;
samplerdesc.BorderColor[0] = 0.5;
samplerdesc.BorderColor[1] = 0.5;
samplerdesc.BorderColor[2] = 0.5;
samplerdesc.BorderColor[3] = 0.5;
samplerdesc.MinLOD = 0;
samplerdesc.MaxLOD = D3D11_FLOAT32_MAX;
Device->CreateSamplerState(&samplerdesc, &samplestate);
===================================
DirectX::CreateWICTextureFromFile(Device, L"xyz.jpg", nullptr, &resourceview);
===================================
Pixel shader file
texcolor = shader.Sample(Sample, tex);
texcolor.w=0;
return textureColor;
===============================
texDesc.Width = width;
texDesc.Height = height;
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
texDesc.SampleDesc.Count = 1;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
texDesc.MiscFlags = D3D11_RESOURCE_MISC_SHARED;

Nonuniform "lightning" of manually created texture in DirectX

I'm trying to create a texture manually from my own data and display it. As I could seize in different DirectX docs the main part of code would be written in this manner
// Global variables
HWND g_hWnd = NULL;
D3D10_DRIVER_TYPE g_driverType = D3D10_DRIVER_TYPE_NULL;
ID3D10Device *g_pd3dDevice = NULL;
IDXGISwapChain *g_pSwapChain = NULL;
ID3D10RenderTargetView *g_pRenderTargetView = NULL;
ID3D10ShaderResourceView *g_pShaderResource = NULL;
ID3DX10Sprite *g_pSprite = NULL;
ID3D10Texture2D *g_pTexture = NULL;
// Any additional code for creation the window object
// ............
// Initialize DirectX
HRESULT InitDirectX()
{
HRESULT hr = S_OK;
RECT rc;
GetClientRect(g_hWnd, &rc);
UINT width = rc.right - rc.left;
UINT height = rc.bottom - rc.top;
D3D10_DRIVER_TYPE driverTypes[] =
{
D3D10_DRIVER_TYPE_HARDWARE,
D3D10_DRIVER_TYPE_REFERENCE
};
UINT numDriverTypes = sizeof(driverTypes)/sizeof(driverTypes[0]);
// Initialization of the Swap Chain and the Device
DXGI_SWAP_CHAIN_DESC sd;
ZeroMemory(&sd, sizeof(sd));
sd.BufferCount = 1;
sd.BufferDesc.Width = 64;
sd.BufferDesc.Height = 64;
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sd.BufferDesc.RefreshRate.Numerator = 60;
sd.BufferDesc.RefreshRate.Denominator = 1;
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
sd.OutputWindow = g_hWnd;
sd.SampleDesc.Count = 1;
sd.SampleDesc.Quality = 0;
sd.Windowed = TRUE;
sd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
for (UINT driverTypeIndex = 0; driverTypeIndex < numDriverTypes; driverTypeIndex++)
{
g_driverType = driverTypes[driverTypeIndex];
hr = D3D10CreateDeviceAndSwapChain(NULL, g_driverType, NULL, D3D10_CREATE_DEVICE_DEBUG, D3D10_SDK_VERSION, &sd, &g_pSwapChain, &g_pd3dDevice);
if (SUCCEEDED(hr))
break;
}
if (FAILED(hr))
return hr;
ID3D10Texture2D *pBackBuffer;
hr = g_pSwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&pBackBuffer);
if (FAILED(hr))
return hr;
hr = g_pd3dDevice->CreateRenderTargetView(pBackBuffer, NULL, &g_pRenderTargetView);
pBackBuffer->Release();
if (FAILED(hr))
return hr;
g_pd3dDevice->OMSetRenderTargets(1, &g_pRenderTargetView, NULL);
// Create the Viewport
D3D10_VIEWPORT vp;
vp.Width = 64;
vp.Height = 64;
vp.MinDepth = 0.0f;
vp.MaxDepth = 1.0f;
vp.TopLeftX = 0.0f;
vp.TopLeftY = 0.0f;
g_pd3dDevice->RSSetViewports(1, &vp);
// Fill the structure of future texture
D3D10_TEXTURE2D_DESC desc;
desc.Width = 64;
desc.Height = 64;
desc.MipLevels = desc.ArraySize = 1;
desc.MiscFlags = 0;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.Usage = D3D10_USAGE_DYNAMIC;
desc.BindFlags = D3D10_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
hr = g_pd3dDevice->CreateTexture2D( &desc, NULL, &g_pTexture );
// Get the access to the texture data
D3D10_MAPPED_TEXTURE2D mappedTex;
g_pTexture->Map( D3D10CalcSubresource(0, 0, 1), D3D10_MAP_WRITE_DISCARD, 0, &mappedTex );
int cnt = 0;
UCHAR* pTexels = (UCHAR*)mappedTex.pData;
ZeroMemory(pTexels, sizeof(pTexels));
for( UINT row = 0; row < desc.Height; row++ )
{
UINT rowStart = row * mappedTex.RowPitch;
for( UINT col = 0; col < desc.Width; col++ )
{
UINT colStart = col * 4;
if (cnt == 0)
{
pTexels[rowStart + colStart + 0] = 255; // Red
pTexels[rowStart + colStart + 1] = 128; // Green
pTexels[rowStart + colStart + 2] = 64; // Blue
pTexels[rowStart + colStart + 3] = 255; // Alpha
}
else
{
pTexels[rowStart + colStart + 0] = 64;// Red
pTexels[rowStart + colStart + 1] = 128;// Green
pTexels[rowStart + colStart + 2] = 255;// Blue
pTexels[rowStart + colStart + 3] = 255;// Alpha
}
cnt++;
cnt %= 2;
}
}
g_pTexture->Unmap( D3D10CalcSubresource(0, 0, 1));
// Bind shader and texture
D3D10_SHADER_RESOURCE_VIEW_DESC srvDesc;
// Fill the shader attributes
srvDesc.Format = desc.Format;
srvDesc.ViewDimension = D3D10_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = desc.MipLevels;
srvDesc.Texture2D.MostDetailedMip = desc.MipLevels - 1;
hr = g_pd3dDevice->CreateShaderResourceView(g_pTexture, &srvDesc, &g_pShaderResource);
// Create sprite object
hr = D3DX10CreateSprite(g_pd3dDevice, 1, &g_pSprite);
if (FAILED(hr))
return hr;
return S_OK;
}
// Now render the texture
void RenderScene()
{
float ClearColor[4] = {1.0f, 1.0f, 1.0f, 1.0f};
g_pd3dDevice->ClearRenderTargetView(g_pRenderTargetView, ClearColor);
D3DXMATRIX mWorld;
D3DXMATRIX mView;
D3DXMATRIX mProjection;
// Now render the generated texture thus it will be entirelly displayed in window
D3DXMatrixTranslation(&mWorld, 0.0f, 0.0f, 0.0f);
D3DXMatrixPerspectiveFovLH(&mProjection, 0.3258f, 1.0f, 0.0f, 0.01f);
g_pSprite->SetProjectionTransform(&mProjection);
D3DXVECTOR3 vEyePt(0.0f, 0.0f, -3.0f);
D3DXVECTOR3 vLookAtPt(0.0f, 0.0f, 0.0f);
D3DXVECTOR3 vUpVec(0.0f, 1.0f, 0.0f);
D3DXMatrixLookAtLH(&mView, &vEyePt, &vLookAtPt, &vUpVec);
g_pSprite->SetViewTransform(&mView);
HRESULT hr;
g_pSprite->Begin(D3DX10_SPRITE_SORT_TEXTURE);
// Render sprite based on shader created on the basis of generated texture
D3DX10_SPRITE SpriteToDraw; // Sprite array
SpriteToDraw.matWorld = mWorld;
SpriteToDraw.TexCoord.x = 0.0f;
SpriteToDraw.TexCoord.y = 0.0f;
SpriteToDraw.TexSize.x = 1.0f;
SpriteToDraw.TexSize.y = 1.0f;
SpriteToDraw.ColorModulate = D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f);
SpriteToDraw.pTexture = g_pShaderResource;
SpriteToDraw.TextureIndex = 0;
hr = g_pSprite->DrawSpritesBuffered(&SpriteToDraw, 1);
if (FAILED(hr))
{
MessageBox(NULL, L"Sprite display error", L"Error", 0);
}
g_pSprite->Flush();
g_pSprite->End();
g_pSwapChain->Present(0, 0);
}
Suddenly, the final result represents the image with nonuniform "lightened" stripes.
link to image http://savepic.org/7623429.jpg:
I've tried to change viewpoint parameters, that led to change the number of visible stripes and their shade, but I can't understand the cause of appearance of different blue/orange shades (in code their colors are uniform). Also, if I generate texture that is evenly filled with one color, i.e:
for( UINT row = 0; row < desc.Height; row++ )
{
UINT rowStart = row * mappedTex.RowPitch;
for( UINT col = 0; col < desc.Width; col++ )
{
UINT colStart = col * 4;
pTexels[rowStart + colStart + 0] = xxx; // Exact color has no reason
pTexels[rowStart + colStart + 1] = yyy;
pTexels[rowStart + colStart + 2] = zzz;
pTexels[rowStart + colStart + 3] = ttt;
}
}
there is no effect of nonuniform shade of the final displayed texture.

DirectX11 How to CreateTexture2D from unsigned char * data

I have a function GetCamImage that returns the image from camera as:
unsigned char* p = GetCamImage(...);
I need to create ID3D11Texture2D from this data. How can I do it in DirectX 11?
Thanks
It depends on what the format of the actual content in that p is, as well as the width, height, and stride. Assuming the image is an RGBA 32-bit format with simple byte pitch alignment and sized as w by h, then it would be something simple like:
D3D11_SUBRESOURCE_DATA initData = {0};
initData.pSysMem = (const void*)p;
initData.SysMemPitch = w * 4;
initData.SysMemSlicePitch = h * w * 4;
D3D11_TEXTURE2D_DESC desc;
desc.Width = w;
desc.Height = h;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = 0;
ID3D11Texture2D* tex = nullptr;
HRESULT hr = d3dDevice->CreateTexture2D( &desc, &initData, &tex );
if (FAILED(hr))
...

Directx 11 depth test not working

I cannot get my program to correctly choose which models to place in front. I have followed the MSDN code exactly. My code appears to correctly draw all polygons in a particular call of DrawIndexed, but each subsequent call seems to cause models to be drawn in the order they are drawn, not based on whether they are closer to the screen.
Here is my code for initializing Direct3d:
DXGI_SWAP_CHAIN_DESC sd;
ZeroMemory( &sd, sizeof( sd ) );
sd.BufferCount = 1;
sd.BufferDesc.Width = width;
sd.BufferDesc.Height = height;
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sd.BufferDesc.RefreshRate.Numerator = 60;
sd.BufferDesc.RefreshRate.Denominator = 1;
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
sd.OutputWindow = hWnd;
sd.SampleDesc.Count = 4;
sd.SampleDesc.Quality = 0;
sd.Windowed = !fullScreen;
sd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
D3D_FEATURE_LEVEL FeatureLevelsRequested = D3D_FEATURE_LEVEL_11_0;
UINT numFeatureLevelsRequested = 1;
D3D_FEATURE_LEVEL FeatureLevelsSupported;
HRESULT hr;
if( FAILED (hr = D3D11CreateDeviceAndSwapChain( adapters[0],
D3D_DRIVER_TYPE_UNKNOWN,
NULL,
NULL,
NULL,
NULL,
D3D11_SDK_VERSION,
&sd,
&swapchain,
&dev,
&FeatureLevelsSupported,
&devcon )))
{
//return;
}
ID3D11Texture2D *pBack = NULL;
swapchain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&pBack);
// use the back buffer address to create the render target
dev->CreateRenderTargetView(pBack, NULL, &backbuffer);
pBack->Release();
// set the render target as the back buffer
// Create depth stencil texture
D3D11_TEXTURE2D_DESC descDepth;
ZeroMemory(&descDepth, sizeof(descDepth));
descDepth.Width = width;
descDepth.Height = height;
descDepth.MipLevels = 1;
descDepth.ArraySize = 1;
descDepth.Format =DXGI_FORMAT_D24_UNORM_S8_UINT;
descDepth.SampleDesc.Count = 4;
descDepth.SampleDesc.Quality = 0;
descDepth.Usage = D3D11_USAGE_DEFAULT;
descDepth.BindFlags = D3D11_BIND_DEPTH_STENCIL;
descDepth.CPUAccessFlags = 0;
descDepth.MiscFlags = 0;
hr = dev->CreateTexture2D( &descDepth, NULL, &g_pDepthStencil);
if(FAILED(hr))
exit(hr);
// Create the depth stencil view
D3D11_DEPTH_STENCIL_VIEW_DESC descDSV;
ZeroMemory(&descDSV, sizeof(descDSV));
descDSV.Format = descDepth.Format;
descDSV.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS;
descDSV.Texture2D.MipSlice = 0;;
//descDSV.Texture2DMS.UnusedField_NothingToDefine = 0;
hr = dev->CreateDepthStencilView( g_pDepthStencil, &descDSV, &g_pDepthStencilView);
if(FAILED(hr))
exit(hr);
devcon->OMSetRenderTargets(1, &backbuffer, g_pDepthStencilView);
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = width;
viewport.Height = height;
devcon->RSSetViewports(1, &viewport);
This is my code for rendering:
void Direct3DRenderer::Render()
{
devcon->ClearRenderTargetView(backbuffer, D3DXCOLOR(0.0f, 0.2f, 0.4f, 1.0f));
devcon->ClearDepthStencilView(g_pDepthStencilView, D3D11_CLEAR_DEPTH|D3D11_CLEAR_STENCIL, 1.0f, 0 );
camera.location = simulation->GetWorld()->GetCameraCoordinates();
camera.direction = simulation->GetWorld()->GetCameraLookAt();
//camera.up = simulation->GetWorld()->GetCameraOrientation();
Vec3d lookAt = camera.location + camera.direction;
XMVECTOR eye = XMVectorSet((float)camera.location[0], (float)camera.location[1], (float)camera.location[2], 0.f);
XMVECTOR look = XMVectorSet(lookAt[0], lookAt[1], lookAt[2], 0);
XMVECTOR up = XMVectorSet(camera.up[0], camera.up[1], camera.up[2], 0);
g_View = XMMatrixLookAtLH(eye, look, up);
ConstantBuffer oncePerFrame;
oncePerFrame.matrix = XMMatrixTranspose(g_View);
devcon->UpdateSubresource(oncePerFrameBuffer, 0, NULL, &oncePerFrame, 0, 0);
UINT stride = sizeof(VERTEX);
UINT offset = 0;
const std::vector<Graphical*> graphicalList = simulation->GetWorld()->GetGraphicalList();
for(int ind = 0; ind < graphicalList.size(); ++ind)
{
switch(graphicalList[ind]->GetModelType())
{
case 1: //Sphere
{
ConstantBuffer oncePerModel2;
oncePerModel2.matrix = XMMatrixTranspose(XMMatrixScalingFromVector(graphicalList[ind]->GetScaleX()) * XMMatrixTranslationFromVector(graphicalList[ind]->GetTranslationX()));
devcon->UpdateSubresource(oncePerModelBuffer, 0, NULL, &oncePerModel2, 0, 0);
devcon->IASetVertexBuffers(0, 1, &(sphereModel.vertexBuffer), &stride, &offset);
devcon->IASetIndexBuffer(sphereModel.indexBuffer, DXGI_FORMAT_R32_UINT, 0);
devcon->DrawIndexed(sphereModel.indexCount, 0, 0);
}
break;
}
}
ConstantBuffer oncePerModel;
oncePerModel.matrix = XMMatrixTranspose(g_World);
devcon->UpdateSubresource(oncePerModelBuffer, 0, NULL, &oncePerModel, 0, 0);
devcon->IASetVertexBuffers(0, 1, &terrainModel.vertexBuffer, &stride, &offset);
devcon->IASetIndexBuffer(terrainModel.indexBuffer, DXGI_FORMAT_R32_UINT, 0);
devcon->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
devcon->DrawIndexed(terrainModel.indexCount, 0, 0);
swapchain->Present(0, 0);
}
I have tried searching extensively, and have followed every tutorial I could find. Nothing fixes it.
In the case of the spheres, depth appears to be correct if viewing from one side, but not the other.
Any help would be appreciated. Thanks.
From OP comment:
The problem ended up being much simpler. I did not set the minimum and
max depth for the viewport object. It worked once I did so.
I had the same problem, it works now:
D3D11_VIEWPORT screenViewport;
/* ... */
screenViewport.MinDepth = 0;
screenViewport.MaxDepth = 1;
/* ... */
context->RSSetViewports(1, &screenViewport);
There are three key steps to Z-Buffering.
Setting the Appropriate Presentation Parameters
Turning On Z-Buffering
Clearing the Z-Buffer
1) Setting the Appropriate Presentation Parameters
D3DPRESENT_PARAMETERS d3dpp;
//...
d3dpp.EnableAutoDepthStencil = TRUE;
d3dpp.AutoDepthStencilFormat = D3DFMT_D16;
EnableAutoDepthStencil
In truth, z-buffering can be complex. Setting this value to TRUE tells Direct3D to automatically create the z-buffer and set it up in a way used most often. There are, of course, uses for the complex method, but we'll stick to simple for now. We'll cover ways the complex method can be useful later in the tutorial.
AutoDepthStencilFormat
This is the format for each pixel in the z-buffer. We don't use the regular pixel format defined in the Presentation Parameters. Instead, we use a special format for z-buffers. This format is D3DFMT_D16. This means that each pixel is 16-bit. There are other formats, but we will not need them for the extent of this tutorial.
2) Turning On Z-Buffering
// d3ddev is your Direct3D device - a variable of type LPDIRECT3DDEVICE9
d3ddev->SetRenderState(D3DRS_ZENABLE, TRUE);
3) Clearing the Z-Buffer
d3ddev->Clear(0, NULL, D3DCLEAR_ZBUFFER, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
By the way this works in DirectX 9.0c. Im not sure if it is compatible with DirectX 11.
Had a similar problem in DirectX10, seems it was initialisation code, I am not sure about the CreateDevice and SwapChain against UNKNOWN driver type, as I have not used that before.
There are a few differences I can see, the Stencil buffer does not specify the operations to perform depth stencil tests against. (Unless this has been specified in the HLSL which is not visible here)
eg:
// Stencil test parameters
dsDesc.StencilEnable = true;
dsDesc.StencilReadMask = 0xFF;
dsDesc.StencilWriteMask = 0xFF;
// Stencil operations if pixel is front-facing
dsDesc.FrontFace.StencilFailOp = D3D10_STENCIL_OP_KEEP;
dsDesc.FrontFace.StencilDepthFailOp = D3D10_STENCIL_OP_INCR;
dsDesc.FrontFace.StencilPassOp = D3D10_STENCIL_OP_KEEP;
dsDesc.FrontFace.StencilFunc = D3D10_COMPARISON_ALWAYS;
// Stencil operations if pixel is back-facing
dsDesc.BackFace.StencilFailOp = D3D10_STENCIL_OP_KEEP;
dsDesc.BackFace.StencilDepthFailOp = D3D10_STENCIL_OP_DECR;
dsDesc.BackFace.StencilPassOp = D3D10_STENCIL_OP_KEEP;
dsDesc.BackFace.StencilFunc = D3D10_COMPARISON_ALWAYS;
In any event, here is a working DirectX10 example, I am pretty sure you can adapt it quickly for DirectX11.
logger->debug("initD3D: Calling D3D10CreateDeviceAndSwapChain.\n");
SYSTEMTIME sysTime;
GetSystemTime(&sysTime);
srand((unsigned int)sysTime.wMilliseconds);
logger->debug("in initD3D.\n");
shutdownXX = false;
count = 1;
quality = 0;
//Create the back buffer desc.
ZeroMemory(&swapDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapDesc.BufferCount = 1;
swapDesc.BufferDesc.Width = width;
swapDesc.BufferDesc.Height = height;
swapDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapDesc.BufferDesc.RefreshRate.Numerator = 60;
swapDesc.BufferDesc.RefreshRate.Denominator = 1;
swapDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapDesc.OutputWindow = hwnd;
swapDesc.SampleDesc.Count = count;
swapDesc.SampleDesc.Quality = quality;
swapDesc.Windowed = FALSE;
swapDesc.BufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
swapDesc.BufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
swapDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
//Create the device.
HRESULT hr = D3D10CreateDeviceAndSwapChain(
NULL,
D3D10_DRIVER_TYPE_HARDWARE,
NULL,
0,
D3D10_SDK_VERSION,
&swapDesc,
&swapChain,
&device);
if (!chk(hr, TEXT("Could not create D3D Device D3D10CreateDeviceAndSwapChain failed.")))
return false;
ID3D10Texture2D *buffer;
logger->debug("initD3D: Calling swapChain->GetBuffer.\n");
hr = swapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*) &buffer);
if (!chk(hr, TEXT("Could not create D3D Device: swapChain->GetBuffer failed.")))
return false;
D3D10_TEXTURE2D_DESC BBDesc;
ZeroMemory(&BBDesc, sizeof(D3D10_TEXTURE2D_DESC));
buffer->GetDesc( &BBDesc );
D3D10_RENDER_TARGET_VIEW_DESC RTVDesc;
ZeroMemory(&RTVDesc, sizeof(D3D10_RENDER_TARGET_VIEW_DESC));
RTVDesc.Format = BBDesc.Format;
//RTVDesc.ViewDimension = D3D10_RTV_DIMENSION_TEXTURE2D;
RTVDesc.ViewDimension = D3D10_RTV_DIMENSION_TEXTURE2DMS;
RTVDesc.Texture2D.MipSlice = 0;
logger->debug("initD3D: Calling device->CreateRenderTargetView.\n");
hr = device->CreateRenderTargetView(buffer, &RTVDesc, &renderTView);
buffer->Release();
if (!chk(hr, TEXT("Could not create D3D Device: device->CreateRenderTargetView failed.")))
return false;
ZeroMemory(&descDepth, sizeof(D3D10_TEXTURE2D_DESC));
descDepth.Width = width;
descDepth.Height = height;
descDepth.MipLevels = 1;
descDepth.ArraySize = 1;
descDepth.Format = DXGI_FORMAT_D32_FLOAT_S8X24_UINT;
//descDepth.Format = DXGI_FORMAT_D32_FLOAT;
descDepth.SampleDesc.Count = count;
descDepth.SampleDesc.Quality = quality;
descDepth.Usage = D3D10_USAGE_DEFAULT;
descDepth.BindFlags = D3D10_BIND_DEPTH_STENCIL;
descDepth.CPUAccessFlags = 0;
descDepth.MiscFlags = 0;
hr = device->CreateTexture2D(&descDepth, NULL, &stencil);
if (!chk(hr, TEXT("device->device->CreateTexture2D Failed\n")))
return false;
// Depth test parameters
dsDesc.DepthEnable = true;
dsDesc.DepthWriteMask = D3D10_DEPTH_WRITE_MASK_ALL;
dsDesc.DepthFunc = D3D10_COMPARISON_LESS;
// Stencil test parameters
dsDesc.StencilEnable = true;
dsDesc.StencilReadMask = 0xFF;
dsDesc.StencilWriteMask = 0xFF;
// Stencil operations if pixel is front-facing
dsDesc.FrontFace.StencilFailOp = D3D10_STENCIL_OP_KEEP;
dsDesc.FrontFace.StencilDepthFailOp = D3D10_STENCIL_OP_INCR;
dsDesc.FrontFace.StencilPassOp = D3D10_STENCIL_OP_KEEP;
dsDesc.FrontFace.StencilFunc = D3D10_COMPARISON_ALWAYS;
// Stencil operations if pixel is back-facing
dsDesc.BackFace.StencilFailOp = D3D10_STENCIL_OP_KEEP;
dsDesc.BackFace.StencilDepthFailOp = D3D10_STENCIL_OP_DECR;
dsDesc.BackFace.StencilPassOp = D3D10_STENCIL_OP_KEEP;
dsDesc.BackFace.StencilFunc = D3D10_COMPARISON_ALWAYS;
hr = device->CreateDepthStencilState(&dsDesc, &pDSState);
if (!chk(hr, TEXT("device->device->CreateDepthStencilState Failed\n")))
return false;
device->OMSetDepthStencilState(pDSState, 1);
ZeroMemory(&descDSV, sizeof(D3D10_DEPTH_STENCIL_VIEW_DESC));
descDSV.Format = descDepth.Format;
//descDSV.ViewDimension = D3D10_DSV_DIMENSION_TEXTURE2D;
descDSV.ViewDimension = D3D10_DSV_DIMENSION_TEXTURE2DMS;
descDSV.Texture2D.MipSlice = 0;
hr = device->CreateDepthStencilView(stencil, &descDSV, &depthStencil);
if (!chk(hr, TEXT("device->device->CreateDepthStencilView Failed\n")))
return false;
device->OMSetRenderTargets(1, &renderTView, depthStencil);
resizeD3D10Window(width, height);
return true;