I've been working with Vulkan for the past couple weeks and I've run into a problem that has only been happening on AMD cards. Specifically the AMD 7970M. I've ran my project on GTX 700 and 900 series cards with no problem. I've even ran on Windows an Linux (Steam OS) with Nvidia cards without a hitch. The problem only shows up on AMD cards and only with my project; all the samples and projects from Sascha Willems run no problem.
Right now I am drawing a textured Raptor model and spinning it in place. I render that off to a texture and then apply that texture to a fullscreen triangle; basic offscreen rendering. However the depth doesn't seem to clear correctly on my 7970M. Instead I get this weird artifacting like the depth isn't being cleared properly:
Of course I tried digging into this with RenderDoc and the depth is totally wrong. Both the Raptor and the Fullscreen Triangle its drawn onto are just a mess:
I've tried comparing my code to the Offscreen example from Sascha Willems and I appear do be doing almost everything the same way. I thought maybe something would be wrong with the way I created my depth but it seems fine in comparison to all the examples I've seen.
Here are some debug views of where I am creating the depth image and view:
Here's the whole method:
bool VKRenderTarget::setupFramebuffer(VKRenderer* renderer)
{
VkDevice device = renderer->GetVKDevice();
VkCommandBuffer setupCommand;
m_colorFormat = renderer->GetPreferredImageFormat();
m_depthFormat = renderer->GetPreferredDepthFormat();
renderer->CreateSetupCommandBuffer();
setupCommand = renderer->GetSetupCommandBuffer();
VkResult err;
//Color attachment
VkImageCreateInfo imageInfo = {};
imageInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageInfo.pNext = nullptr;
imageInfo.format = m_colorFormat;
imageInfo.imageType = VK_IMAGE_TYPE_2D;
imageInfo.extent.width = m_width;
imageInfo.extent.height = m_height;
imageInfo.mipLevels = 1;
imageInfo.arrayLayers = 1;
imageInfo.samples = VK_SAMPLE_COUNT_1_BIT;
imageInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageInfo.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
imageInfo.flags = 0;
VkMemoryAllocateInfo memAllocInfo = {};
memAllocInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
VkMemoryRequirements memReqs;
err = vkCreateImage(device, &imageInfo, nullptr, &m_color.image);
assert(!err);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error creating color image!\n");
#endif
return false;
}
vkGetImageMemoryRequirements(device, m_color.image, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
renderer->MemoryTypeFromProperties(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex);
err = vkAllocateMemory(device, &memAllocInfo, nullptr, &m_color.memory);
assert(!err);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error allocating color image memory!\n");
#endif
return false;
}
err = vkBindImageMemory(device, m_color.image, m_color.memory, 0);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error binding color image memory!\n");
#endif
return false;
}
renderer->SetImageLayout(setupCommand, m_color.image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
VkImageViewCreateInfo viewInfo = {};
viewInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewInfo.pNext = nullptr;
viewInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
viewInfo.format = m_colorFormat;
viewInfo.flags = 0;
viewInfo.subresourceRange = {};
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
viewInfo.subresourceRange.baseMipLevel = 0;
viewInfo.subresourceRange.levelCount = 1;
viewInfo.subresourceRange.baseArrayLayer = 0;
viewInfo.subresourceRange.layerCount = 1;
viewInfo.image = m_color.image;
err = vkCreateImageView(device, &viewInfo, nullptr, &m_color.view);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error creating color image view!\n");
#endif
return false;
}
//We can reuse the same info structs to build the depth image
imageInfo.format = m_depthFormat;
imageInfo.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
err = vkCreateImage(device, &imageInfo, nullptr, &(m_depth.image));
assert(!err);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error creating depth image!\n");
#endif
return false;
}
viewInfo.format = m_depthFormat;
viewInfo.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
vkGetImageMemoryRequirements(device, m_depth.image, &memReqs);
memAllocInfo.allocationSize = memReqs.size;
renderer->MemoryTypeFromProperties(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &memAllocInfo.memoryTypeIndex);
err = vkAllocateMemory(device, &memAllocInfo, nullptr, &m_depth.memory);
assert(!err);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error allocating depth image memory!\n");
#endif
return false;
}
err = vkBindImageMemory(device, m_depth.image, m_depth.memory, 0);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error binding depth image memory!\n");
#endif
return false;
}
renderer->SetImageLayout(setupCommand, m_depth.image,
VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT,
VK_IMAGE_LAYOUT_UNDEFINED,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
viewInfo.image = m_depth.image;
err = vkCreateImageView(device, &viewInfo, nullptr, &m_depth.view);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error creating depth image view!\n");
#endif
return false;
}
renderer->FlushSetupCommandBuffer();
//Finally create internal framebuffer
VkImageView attachments[2];
attachments[0] = m_color.view;
attachments[1] = m_depth.view;
VkFramebufferCreateInfo framebufferInfo = {};
framebufferInfo.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebufferInfo.pNext = nullptr;
framebufferInfo.flags = 0;
framebufferInfo.renderPass = *((VKRenderPass*)m_renderPass)->GetVkRenderPass();
framebufferInfo.attachmentCount = 2;
framebufferInfo.pAttachments = attachments;
framebufferInfo.width = m_width;
framebufferInfo.height = m_height;
framebufferInfo.layers = 1;
err = vkCreateFramebuffer(device, &framebufferInfo, nullptr, &m_framebuffer);
if (err != VK_SUCCESS)
{
#ifdef _DEBUG
Core::DebugPrintF("VKRenderTarget::VPrepare(): Error creating framebuffer!\n");
#endif
return false;
}
return true;
}
If anyone wants more info on the code feel free to ask and I will provide it. There's a LOT of lines of code for this project so I don't want everyone to have to wade through it all. If you'd like to though all the code can be found at http://github.com/thirddegree/HatchitGraphics/tree/dev
Edit: After a bit more poking around I've found that even the color doesn't really clear properly. RenderDoc shows that each frame only renders the cutout of the raptor and doesn't clear the rest of the frame. Is this a driver problem?
Edit: Some more info. I've found that if I draw NOTHING, just begin and end a render pass not even drawing my fullscreen triangle, the screen will clear. However if I draw just the triangle, the depth is wrong (even if I don't blit anything from offscreen or apply any sort of texture).
Edit: More specifically the color will clear but the depth does not. If I don't draw anything the depth will stay black; all 0s. Why the fullscreen triangle causes the weird static of depth I am not sure.
This is exactly what happened to me when I started to get my Vulkan examples work on AMD hardware:
Their GPUs rely heavily on correct image transitions (which are mostly ignored by e.g. NVIDIA) and I think the corruption you see in your screenshots is the result of a missing pre-present barrier.
The pre-present barrier (see here) transforms the image layout of your color attachment into a presentation format for passing presenting it to the swap chain.
This has to be done after you have finished rendering to your color attachment to make sure that the attachment is completed before presenting it.
You can see an example of this in the draw routine of my examples.
On rendering the next frame you need to transform the color attachment's image format back in order to be able to render to it again.
To sum it up:
Before rendering to your color attachment transition your image from VK_IMAGE_LAYOUT_PRESENT_SRC_KHR to VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL (aka "post present")
Do your rendering
Transition your color attachment image from VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR and present that to the swap chain
Thanks to Sascha and some extra errors that popped up with the new 1.0.5 LunarG SDK I've managed to fix the problem. The commit with the fixing changes (and a couple other little things) can be found here: https://github.com/thirddegree/HatchitGraphics/commit/515d0303f45a8e9c00f67a74c824530ea37b687a
It was a combination of a few things:
I needed to set the depth image on the framebuffer attachment of the swapchain to VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT rather than just VK_IMAGE_ASPECT_DEPTH_BIT
For pretty much every image memory barrier I forgot to specifiy the baseArrayLayer of the subresourceRange. This did not produce an error until version 1.0.5.
Another error that didn't pop up until 1.0.5 that might help you track a similar bug down and affected my texture generation was that before I mapped device memory for a texture to host memory I needed to transition it from VK_IMAGE_LAYOUT_UNDEFINED to VK_IMAGE_LAYOUT_GENERAL, submit that command, map the memory and then transition it from GENERAL to VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL (don't forget to submit that command too). Again this is only for textures that you want to sample but I guess the moral here is "actually submit your image transitions"
Related
A little background: I'm attempting to make a Windows (10) application which makes the screen look like an old CRT monitor, scanlines, blur, and all. I'm using this official Microsoft screen capture demo as a starting point: At this stage I can capture a window, and display it back in a new mouse-through window as if it were the original window.
I am attempting to use the CRT-Royale CRT shaders which are generally considered the best CRT shaders; these are available in .cg format. I transpile them with cgc to hlsl, then compile the hlsl files to compiled shader byte code with fxc. I am able to successfully load the compiled shaders and create the pixel shader. I then set the pixel shader in the d3d context. I then attempt to copy the capture surface frame to a pixel shader resource and set the created shaders resource. All of this builds and runs, but I do not see any difference in the output image and am not sure how to proceed. Below is the relevant code. I am not a c++ developer and am making this as a personal project which I plan on open sourcing once I have a primitive working version. Any advice is appreciated, thanks.
SimpleCapture::SimpleCapture(
IDirect3DDevice const& device,
GraphicsCaptureItem const& item)
{
m_item = item;
m_device = device;
// Set up
auto d3dDevice = GetDXGIInterfaceFromObject<ID3D11Device>(m_device);
d3dDevice->GetImmediateContext(m_d3dContext.put());
auto size = m_item.Size();
m_swapChain = CreateDXGISwapChain(
d3dDevice,
static_cast<uint32_t>(size.Width),
static_cast<uint32_t>(size.Height),
static_cast<DXGI_FORMAT>(DirectXPixelFormat::B8G8R8A8UIntNormalized),
2);
// ADDED THIS
HRESULT hr1 = D3DReadFileToBlob(L"crt-royale-first-pass-ps_4_0.fxc", &ps_1_buffer);
HRESULT hr = d3dDevice->CreatePixelShader(
ps_1_buffer->GetBufferPointer(),
ps_1_buffer->GetBufferSize(),
nullptr,
&ps_1
);
m_d3dContext->PSSetShader(
ps_1,
nullptr,
0
);
// END OF ADDED CHANGES
// Create framepool, define pixel format (DXGI_FORMAT_B8G8R8A8_UNORM), and frame size.
m_framePool = Direct3D11CaptureFramePool::Create(
m_device,
DirectXPixelFormat::B8G8R8A8UIntNormalized,
2,
size);
m_session = m_framePool.CreateCaptureSession(m_item);
m_lastSize = size;
m_frameArrived = m_framePool.FrameArrived(auto_revoke, { this, &SimpleCapture::OnFrameArrived });
}
void SimpleCapture::OnFrameArrived(
Direct3D11CaptureFramePool const& sender,
winrt::Windows::Foundation::IInspectable const&)
{
auto newSize = false;
{
auto frame = sender.TryGetNextFrame();
auto frameContentSize = frame.ContentSize();
if (frameContentSize.Width != m_lastSize.Width ||
frameContentSize.Height != m_lastSize.Height)
{
// The thing we have been capturing has changed size.
// We need to resize our swap chain first, then blit the pixels.
// After we do that, retire the frame and then recreate our frame pool.
newSize = true;
m_lastSize = frameContentSize;
m_swapChain->ResizeBuffers(
2,
static_cast<uint32_t>(m_lastSize.Width),
static_cast<uint32_t>(m_lastSize.Height),
static_cast<DXGI_FORMAT>(DirectXPixelFormat::B8G8R8A8UIntNormalized),
0);
}
{
auto frameSurface = GetDXGIInterfaceFromObject<ID3D11Texture2D>(frame.Surface());
com_ptr<ID3D11Texture2D> backBuffer;
check_hresult(m_swapChain->GetBuffer(0, guid_of<ID3D11Texture2D>(), backBuffer.put_void()));
// ADDED THIS
D3D11_TEXTURE2D_DESC txtDesc = {};
txtDesc.MipLevels = txtDesc.ArraySize = 1;
txtDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
txtDesc.SampleDesc.Count = 1;
txtDesc.Usage = D3D11_USAGE_IMMUTABLE;
txtDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
auto d3dDevice = GetDXGIInterfaceFromObject<ID3D11Device>(m_device);
ID3D11Texture2D *tex;
d3dDevice->CreateTexture2D(&txtDesc, NULL,
&tex);
frameSurface.copy_to(&tex);
d3dDevice->CreateShaderResourceView(
tex,
nullptr,
srv_1
);
auto texture = srv_1;
m_d3dContext->PSSetShaderResources(0, 1, texture);
// END OF ADDED CHANGES
m_d3dContext->CopyResource(backBuffer.get(), frameSurface.get());
}
}
DXGI_PRESENT_PARAMETERS presentParameters = { 0 };
m_swapChain->Present1(1, 0, &presentParameters);
... // Truncated
Shaders define how things are drawn. However, you don't draw anything - you just copy, which is why the shader doesn't do anything.
What you should do is to remove the CopyResource call, and instead draw a full screen quad on the back buffer (Which requires you to create a vertex buffer that you can bind, then set the back buffer as render target, and finally call Draw/DrawIndexed to actually render something, which then will invoke the shader).
Also - since I'm not sure whether you already do this and just stripped it from the shown code - functions like CreatePixelShader don't return HRESULTs just for the fun of it - you should check what is actually returned, because DirectX silently returns most errors and expects you to handle them, instead of crashing your program.
I am trying to write a C++ lambda that is registered and to be used in Lua using the Sol2 binding. The callback below should create an SDL_Texture, and clear it to a color. A Lua_Texture is just a wrapper for an SDL_Texture, and l_txt.texture is of type SDL_Texture*.
lua.set_function("init_texture",
[render](Lua_Texture &l_txt, int w, int h)
{
// free any previous texture
l_txt.deleteTexture();
l_txt.texture = SDL_CreateTexture(render, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET, w, h);
SDL_SetRenderTarget(render, l_txt.texture);
SDL_Texture *target = SDL_GetRenderTarget(render);
assert(l_txt.texture == target);
assert(target == nullptr);
SDL_SetRenderDrawColor(render, 0xFF, 0x22, 0x22, 0xFF);
SDL_RenderClear(render);
});
My problem is that SDL_SetRenderTarget isn't functioning as I'd expect it. I try to set the texture as the target so I can clear it's color, but when I try to draw the texture to the screen it is still blank. The asserts in the above code both fail, and show that the current target texture is not set to the texture I am trying to clear and later use, nor is it Null (which is the expected value if there is no current target texture).
I have used this snippet of code before in just c++ (not as a Lua callback) and it works as intended. Somehow, embedding it in Lua causes the behavior to change. Any help is very much appreciated as I've been pulling my hair out over this for a while, thanks!
I may have an answer for you, but you're not going to like it.
It looks like SDL_GetRenderTarget doesn't work as expected.
I got the exact same problem you have (that's how I found your question), and I could reproduce it reliably using that simple program :
int rendererIndex;
[snipped code : rendererIndex is set to the index of the DX11 renderer]
SDL_Renderer * renderer = SDL_CreateRenderer(pWindow->pWindow, rendererIndex, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC | SDL_RENDERER_TARGETTEXTURE);
SDL_Texture* rtTexture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET, 200, 200);
SDL_SetRenderTarget(renderer, rtTexture);
if(SDL_GetRenderTarget(renderer) != rtTexture)
printf("ERROR.");
This always produces :
ERROR.
The workaround I used it that I'm saving the pointer to the render target texture I'm setting for the renderer and I don't use SDL_GetRenderTarget.
EDIT :
I was curious why I didn't get the correct render target when getting it, and I look through SDL2's source code. I found out why (code snipped for clarity) :
int
SDL_SetRenderTarget(SDL_Renderer *renderer, SDL_Texture *texture)
{
// CODE SNIPPED
/* texture == NULL is valid and means reset the target to the window */
if (texture) {
CHECK_TEXTURE_MAGIC(texture, -1);
if (renderer != texture->renderer) {
return SDL_SetError("Texture was not created with this renderer");
}
if (texture->access != SDL_TEXTUREACCESS_TARGET) {
return SDL_SetError("Texture not created with SDL_TEXTUREACCESS_TARGET");
}
// *** EMPHASIS MINE : This is the problem.
if (texture->native) {
/* Always render to the native texture */
texture = texture->native;
}
}
// CODE SNIPPED
renderer->target = texture;
// CODE SNIPPED
}
SDL_Texture *
SDL_GetRenderTarget(SDL_Renderer *renderer)
{
return renderer->target;
}
In short, the renderer saves the current render target in renderer->target, but not before converting the current texture to it's native form. When we use SDL_GetRenderTarget, we're getting that native texture, which may or may not be different.
I am trying to render a texture that gets passed through a pixel shader.
Currently my shader is as follows:
float4 EffectProcess( float2 Tex : TEXCOORD0 ) : COLOR0
{
return float4(1,0,0,1);
}
technique MyTechnique
{
pass p0
{
VertexShader = null;
PixelShader = compile ps_2_0 EffectProcess();
}
}
As you can see, it is a very basic shader that makes that forces the pixels to be red.
UINT uiPasses = 0;
res= g_lpEffect->Begin(&uiPasses, 0);
for (UINT uiPass = 0; uiPass < uiPasses; uiPass++)
{
res = g_lpEffect->BeginPass(uiPass);
res = sprite->Begin(D3DXSPRITE_SORT_TEXTURE);
res = sprite->Draw(tex, NULL, 0x0, 0x0, 0xFFFFFFFF);
res = sprite->End();
res = g_lpEffect->EndPass();
}
res = g_lpEffect->End();
And I am drawing the texture using the shader like so. I am not sure this is the correct way to do it though and have found very little resources on the subject.
The shader is being created correctly and the texture aswell, all calls return a hresult of S_OK, yet when I run the code, the texture shows perfectly, without being overwritten by red.
Both sprite and effects by default store initial pipeline state and set up their own when Begin is called and then restore it when End is called. So I suspect that sprite->Begin(D3DXSPRITE_SORT_TEXTURE); will disable effect processing and your pixel shader is never called. You may try to pass something like D3DXSPRITE_DONOTMODIFY_RENDERSTATE into Begin to prevent it from modifying pipeline state, though this may break sprite rendering. It would be better to get rid of sprite altogether and write your own sprite shader (both vertex and pixel) because fixed pipeline rendering is mostly deprecated these days.
I want to understand DXGI Desktop Duplication. I have read a lot and this is the code I copied from parts of the DesktopDuplication sample on the Microsoft Website. My plan is to get the Buffer or Array from the DesktopImage because I want to make a new Texture for an other program. I hope somebody can explain me what I can do to get it.
void DesktopDublication::GetFrame(_Out_ FRAME_DATA* Data, _Out_ bool* Timeout)
{
IDXGIResource* DesktopResource = nullptr;
DXGI_OUTDUPL_FRAME_INFO FrameInfo;
// Get new frame
HRESULT hr = m_DeskDupl->AcquireNextFrame(500, &FrameInfo, &DesktopResource);
if (hr == DXGI_ERROR_WAIT_TIMEOUT)
{
*Timeout = true;
}
*Timeout = false;
if (FAILED(hr))
{
}
// If still holding old frame, destroy it
if (m_AcquiredDesktopImage)
{
m_AcquiredDesktopImage->Release();
m_AcquiredDesktopImage = nullptr;
}
// QI for IDXGIResource
hr = DesktopResource->QueryInterface(__uuidof(ID3D11Texture2D), reinterpret_cast<void **>(&m_AcquiredDesktopImage));
DesktopResource->Release();
DesktopResource = nullptr;
if (FAILED(hr))
{
}
// Get metadata
if (FrameInfo.TotalMetadataBufferSize)
{
// Old buffer too small
if (FrameInfo.TotalMetadataBufferSize > m_MetaDataSize)
{
if (m_MetaDataBuffer)
{
delete[] m_MetaDataBuffer;
m_MetaDataBuffer = nullptr;
}
m_MetaDataBuffer = new (std::nothrow) BYTE[FrameInfo.TotalMetadataBufferSize];
if (!m_MetaDataBuffer)
{
m_MetaDataSize = 0;
Data->MoveCount = 0;
Data->DirtyCount = 0;
}
m_MetaDataSize = FrameInfo.TotalMetadataBufferSize;
}
UINT BufSize = FrameInfo.TotalMetadataBufferSize;
// Get move rectangles
hr = m_DeskDupl->GetFrameMoveRects(BufSize, reinterpret_cast<DXGI_OUTDUPL_MOVE_RECT*>(m_MetaDataBuffer), &BufSize);
if (FAILED(hr))
{
Data->MoveCount = 0;
Data->DirtyCount = 0;
}
Data->MoveCount = BufSize / sizeof(DXGI_OUTDUPL_MOVE_RECT);
BYTE* DirtyRects = m_MetaDataBuffer + BufSize;
BufSize = FrameInfo.TotalMetadataBufferSize - BufSize;
// Get dirty rectangles
hr = m_DeskDupl->GetFrameDirtyRects(BufSize, reinterpret_cast<RECT*>(DirtyRects), &BufSize);
if (FAILED(hr))
{
Data->MoveCount = 0;
Data->DirtyCount = 0;
}
Data->DirtyCount = BufSize / sizeof(RECT);
Data->MetaData = m_MetaDataBuffer;
}
Data->Frame = m_AcquiredDesktopImage;
Data->FrameInfo = FrameInfo;
}
If I'm understanding you correctly, you want to get the current desktop image, duplicate it into a private texture, and then render that private texture onto your window. I would start by reading up on Direct3D 11 and learning how to render a scene, as you will need D3D to do anything with the texture object you get from DXGI. This, this, and this can get you started on D3D11. I would also spend some time reading through the source of the sample you copied your code from, as it completely explains how to do this. Here is the link to the full source code for that sample.
To actually get the texture data and render it out, you need to do the following:
1). Create a D3D11 Device object and a Device Context.
2). Write and compile a Vertex and Pixel shader for the graphics card, then load them into your application.
3). Create an Input Layout object and set it to the device.
4). Initialize the required Blend, Depth-Stencil, and Rasterizer states for the device.
5). Create a Texture object and a Shader Resource View object.
6). Acquire the Desktop Duplication texture using the above code.
7). Use CopyResource to copy the data into your texture.
8). Render that texture to the screen.
This will capture all data displayed on one of the desktops to your texture. It does not do processing on the dirty rects of the desktop. It does not do processing on moved regions. This is bare bones 'capture the desktop and display it elsewhere' code.
If you want to get more in depth, read the linked resources and study the sample code, as the sample basically does what you're asking for.
Since tacking this onto my last answer didn't feel quite right, I decided to create a second.
If you want to read the desktop data to a file, you need a D3D11 Device object, a texture object with the D3D11_USAGE_STAGING flag set, and a method of converting the RGBA pixel data of the desktop texture to whatever it is you want. The basic procedure is a simplified version of the one in my original answer:
1). Create a D3D11 Device object and a Device Context.
2). Create a Staging Texture with the same format as the Desktop Texture.
3). Use CopyResource to copy the Desktop Texture into your Staging Texture.
4). Use ID3D11DeviceContext::Map() to get a pointer to the data contained in the Staging Texture.
Make sure you know how Map works and make sure you can write out image files from a single binary stream. There may also be padding in the image buffer, so be aware you may also need to filter that out. Additionally, make sure you Unmap the buffer instead of calling free, as the buffer given to you almost certainly does not belong to the CRT.
We are working with the Kinect to track faces for a schoolproject. We have set up Visual Studio 2012, and all the test programs are working correctly. However we are trying to run this code and it gives us an error. After many attempts to fix the code, it gives the following error:
"The application was unable to start correctly (0xc000007b).Click OK to close the application.
The good thing is that it's finally running. The bad thing is that the compiler doesn't throw any errors other than this vague error.
We are completely lost and we hope that someone can help us or point us into the right direction. Thanks in advance for helping us.
The code:
#include "stdafx.h"
#include <iostream>
#include <Windows.h>
#include <NuiApi.h>
#include <FaceTrackLib.h>
#include <NuiSensor.h>
using namespace std;
HANDLE rgbStream;
HANDLE depthStream;
INuiSensor* sensor;
#define width 640
#define height 480
bool initKinect() {
// Get a working kinect sensor
int numSensors;
if (NuiGetSensorCount(&numSensors) < 0 || numSensors < 1) return false;
if (NuiCreateSensorByIndex(0, &sensor) < 0) return false;
// Initialize sensor
sensor->NuiInitialize(NUI_INITIALIZE_FLAG_USES_DEPTH | NUI_INITIALIZE_FLAG_USES_COLOR);
sensor->NuiImageStreamOpen(
NUI_IMAGE_TYPE_COLOR, // Depth camera or rgb camera?
NUI_IMAGE_RESOLUTION_640x480, // Image resolution
0, // Image stream flags, e.g. near mode
2, // Number of frames to buffer
NULL, // Event handle
&rgbStream);
// --------------- END CHANGED CODE -----------------
return true;
}
BYTE* dataEnd;
USHORT* dataEndD;
void getKinectDataD(){
NUI_IMAGE_FRAME imageFrame;
NUI_LOCKED_RECT LockedRect;
if (sensor->NuiImageStreamGetNextFrame(rgbStream, 0, &imageFrame) < 0) return;
INuiFrameTexture* texture = imageFrame.pFrameTexture;
texture->LockRect(0, &LockedRect, NULL, 0);
const USHORT* curr = (const USHORT*)LockedRect.pBits;
const USHORT* dataEnding = curr + (width*height);
if (LockedRect.Pitch != 0)
{
const BYTE* curr = (const BYTE*)LockedRect.pBits;
dataEnd = (BYTE*)(curr + (width*height) * 4);
}
while (curr < dataEnding) {
// Get depth in millimeters
USHORT depth = NuiDepthPixelToDepth(*curr++);
dataEndD = (USHORT*)depth;
// Draw a grayscale image of the depth:
// B,G,R are all set to depth%256, alpha set to 1.
}
texture->UnlockRect(0);
sensor->NuiImageStreamReleaseFrame(rgbStream, &imageFrame);
}
// This example assumes that the application provides
// void* cameraFrameBuffer, a buffer for an image, and that there is a method
// to fill the buffer with data from a camera, for example
// cameraObj.ProcessIO(cameraFrameBuffer)
int main(){
initKinect();
// Create an instance of a face tracker
IFTFaceTracker* pFT = FTCreateFaceTracker();
if (!pFT)
{
// Handle errors
}
// Initialize cameras configuration structures.
// IMPORTANT NOTE: resolutions and focal lengths must be accurate, since it affects tracking precision!
// It is better to use enums defined in NuiAPI.h
// Video camera config with width, height, focal length in pixels
// NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 640x480 resolution
// If you use different resolutions, multiply this focal length by the scaling factor
FT_CAMERA_CONFIG videoCameraConfig = { 640, 480, NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS };
// Depth camera config with width, height, focal length in pixels
// NUI_CAMERA_COLOR_NOMINAL_FOCAL_LENGTH_IN_PIXELS focal length is computed for 320x240 resolution
// If you use different resolutions, multiply this focal length by the scaling factor
FT_CAMERA_CONFIG depthCameraConfig = { 320, 240, NUI_CAMERA_DEPTH_NOMINAL_FOCAL_LENGTH_IN_PIXELS };
// Initialize the face tracker
HRESULT hr = pFT->Initialize(&videoCameraConfig, &depthCameraConfig, NULL, NULL);
if (FAILED(hr))
{
// Handle errors
}
// Create a face tracking result interface
IFTResult* pFTResult = NULL;
hr = pFT->CreateFTResult(&pFTResult);
if (FAILED(hr))
{
// Handle errors
}
// Prepare image interfaces that hold RGB and depth data
IFTImage* pColorFrame = FTCreateImage();
IFTImage* pDepthFrame = FTCreateImage();
if (!pColorFrame || !pDepthFrame)
{
// Handle errors
}
// Attach created interfaces to the RGB and depth buffers that are filled with
// corresponding RGB and depth frame data from Kinect cameras
pColorFrame->Attach(640, 480, dataEnd, FTIMAGEFORMAT_UINT8_R8G8B8, 640 * 3);
pDepthFrame->Attach(320, 240, dataEndD, FTIMAGEFORMAT_UINT16_D13P3, 320 * 2);
// You can also use Allocate() method in which case IFTImage interfaces own their memory.
// In this case use CopyTo() method to copy buffers
FT_SENSOR_DATA sensorData;
sensorData.ZoomFactor = 1.0f; // Not used must be 1.0
bool isFaceTracked = false;
// Track a face
while (true)
{
// Call Kinect API to fill videoCameraFrameBuffer and depthFrameBuffer with RGB and depth data
getKinectDataD();
// Check if we are already tracking a face
if (!isFaceTracked)
{
// Initiate face tracking.
// This call is more expensive and searches the input frame for a face.
hr = pFT->StartTracking(&sensorData, NULL, NULL, pFTResult);
if (SUCCEEDED(hr))
{
isFaceTracked = true;
}
else
{
// No faces found
isFaceTracked = false;
}
}
else
{
// Continue tracking. It uses a previously known face position.
// This call is less expensive than StartTracking()
hr = pFT->ContinueTracking(&sensorData, NULL, pFTResult);
if (FAILED(hr))
{
// Lost the face
isFaceTracked = false;
}
}
// Do something with pFTResult like visualize the mask, drive your 3D avatar,
// recognize facial expressions
}
// Clean up
pFTResult->Release();
pColorFrame->Release();
pDepthFrame->Release();
pFT->Release();
return 0;
}
We figured it out we used the wrong dll indeed, it runs without errors now. But we ran in to an another problem, we have no clue how to use the pFTResult and retrieve the face angles with use of "getFaceRect". Does somebody know how?