C++ Capture Full screen in real time - c++

I'm developing a software that should capture what is happening on the screen to make some processing. One of the requirements is that the software has to run at least 30FPS.
I've tried several options and I'm going to shopw you two, But none of them fully works to my needs:
(1) Using Direct X - The problem with this approach is that I cannot run it at 30 FPS (I get between 15 and 20):
DirectXScreenCapturer.h:
#include "IScreenCapturer.h"
#include <Wincodec.h> // we use WIC for saving images
#include <d3d9.h> // DirectX 9 header
#pragma comment(lib, "d3d9.lib") // link to DirectX 9 library
class DirectXScreenCapturer : public IScreenCapturer
{
public:
DirectXScreenCapturer();
~DirectXScreenCapturer();
virtual bool CaptureScreen(cv::Mat&) override;
private:
IDirect3D9* _d3d;
IDirect3DDevice9* _device;
IDirect3DSurface9* _surface;
cv::Mat _screen;
};
DirectXScreenCapturer.cpp:
#include "DirectXScreenCapturer.h"
DirectXScreenCapturer::DirectXScreenCapturer() : _d3d(NULL), _device(NULL), _surface(NULL)
{
HRESULT hr = S_OK;
D3DPRESENT_PARAMETERS parameters = { 0 };
D3DDISPLAYMODE mode;
D3DLOCKED_RECT rc;
LPBYTE *shots = nullptr;
// init D3D and get screen size
_d3d = Direct3DCreate9(D3D_SDK_VERSION);
_d3d->GetAdapterDisplayMode(D3DADAPTER_DEFAULT, &mode);
parameters.Windowed = TRUE;
parameters.BackBufferCount = 1;
parameters.BackBufferHeight = mode.Height;
parameters.BackBufferWidth = mode.Width;
parameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
parameters.hDeviceWindow = NULL;
// create device & capture surface
hr = _d3d->CreateDevice(D3DADAPTER_DEFAULT, _D3DDEVTYPE::D3DDEVTYPE_HAL, NULL, D3DCREATE_HARDWARE_VERTEXPROCESSING | D3DCREATE_DISABLE_PSGP_THREADING | D3DCREATE_PUREDEVICE, &parameters, &_device);
hr = _device->CreateOffscreenPlainSurface(mode.Width, mode.Height, D3DFMT_A8R8G8B8, D3DPOOL_SYSTEMMEM, &_surface, nullptr);
// compute the required buffer size
hr = _surface->LockRect(&rc, NULL, 0);
hr = _surface->UnlockRect();
// allocate screenshots buffers
_screen = cv::Mat(mode.Height, mode.Width, CV_8UC4, rc.pBits);
}
DirectXScreenCapturer::~DirectXScreenCapturer()
{
}
bool DirectXScreenCapturer::CaptureScreen(cv::Mat& result)
{
_device->GetFrontBufferData(0, _surface);
result = _screen;
return true;
}
(2) I've tried DXGI with Direct X. This solution works really well in Real Time, but fails to capture the screen when another application is running in full screen, so the software doesn't work if I'm watching movies, playign games, etc:
DXGIScreenCapturer.h
#include "IScreenCapturer.h"
#include <DXGI1_2.h>
#pragma comment(lib, "Dxgi.lib")
#include <D3D11.h>
#pragma comment(lib, "D3D11.lib")
class DXGIScreenCapturer : public IScreenCapturer
{
public:
DXGIScreenCapturer();
~DXGIScreenCapturer();
bool Init();
virtual bool CaptureScreen(cv::Mat&) override;
private:
ID3D11Device* _lDevice;
ID3D11DeviceContext* _lImmediateContext;
IDXGIOutputDuplication* _lDeskDupl;
ID3D11Texture2D* _lAcquiredDesktopImage;
DXGI_OUTPUT_DESC _lOutputDesc;
DXGI_OUTDUPL_DESC _lOutputDuplDesc;
D3D11_MAPPED_SUBRESOURCE _resource;
ID3D11Texture2D* currTexture;
cv::Mat _result;
};
DXGIScreenCapturer.cpp
#include "DXGIScreenCapturer.h"
DXGIScreenCapturer::DXGIScreenCapturer()
{
Init();
}
DXGIScreenCapturer::~DXGIScreenCapturer()
{
}
bool DXGIScreenCapturer::Init() {
// Feature levels supported
D3D_FEATURE_LEVEL gFeatureLevels[] = {
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_1
};
UINT gNumFeatureLevels = ARRAYSIZE(gFeatureLevels);
D3D_FEATURE_LEVEL lFeatureLevel;
HRESULT hr(E_FAIL);
hr = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_FLAG::D3D11_CREATE_DEVICE_SINGLETHREADED, gFeatureLevels, gNumFeatureLevels, D3D11_SDK_VERSION, &_lDevice, &lFeatureLevel, &_lImmediateContext);
if (FAILED(hr))
return false;
if (!_lDevice)
return false;
// Get DXGI device
IDXGIDevice* lDxgiDevice;
hr = _lDevice->QueryInterface(__uuidof(IDXGIDevice), reinterpret_cast<void**>(&lDxgiDevice));
if (FAILED(hr))
return false;
// Get DXGI adapter
IDXGIAdapter* lDxgiAdapter;
hr = lDxgiDevice->GetParent(__uuidof(IDXGIAdapter), reinterpret_cast<void**>(&lDxgiAdapter));
lDxgiDevice->Release();
lDxgiDevice = nullptr;
if (FAILED(hr))
return false;
UINT Output = 0;
// Get output
IDXGIOutput* lDxgiOutput;
hr = lDxgiAdapter->EnumOutputs(Output, &lDxgiOutput);
if (FAILED(hr))
return false;
lDxgiAdapter->Release();
lDxgiAdapter = nullptr;
hr = lDxgiOutput->GetDesc(&_lOutputDesc);
if (FAILED(hr))
return false;
// QI for Output 1
IDXGIOutput1* lDxgiOutput1;
hr = lDxgiOutput->QueryInterface(__uuidof(lDxgiOutput1), reinterpret_cast<void**>(&lDxgiOutput1));
lDxgiOutput->Release();
lDxgiOutput = nullptr;
if (FAILED(hr))
return false;
// Create desktop duplication
hr = lDxgiOutput1->DuplicateOutput(_lDevice, &_lDeskDupl);
if (FAILED(hr))
return false;
lDxgiOutput1->Release();
lDxgiOutput1 = nullptr;
// Create GUI drawing texture
_lDeskDupl->GetDesc(&_lOutputDuplDesc);
// Create CPU access texture
D3D11_TEXTURE2D_DESC desc;
desc.Width = _lOutputDuplDesc.ModeDesc.Width;
desc.Height = _lOutputDuplDesc.ModeDesc.Height;
desc.Format = _lOutputDuplDesc.ModeDesc.Format;
desc.ArraySize = 1;
desc.BindFlags = 0;
desc.MiscFlags = 0;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.MipLevels = 1;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_FLAG::D3D11_CPU_ACCESS_READ;
desc.Usage = D3D11_USAGE::D3D11_USAGE_STAGING;
hr = _lDevice->CreateTexture2D(&desc, NULL, &currTexture);
if (!currTexture)
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
while (!CaptureScreen(_result));
_result = cv::Mat(desc.Height, desc.Width, CV_8UC4, _resource.pData);
return true;
}
bool DXGIScreenCapturer::CaptureScreen(cv::Mat& output)
{
HRESULT hr(E_FAIL);
IDXGIResource* lDesktopResource = nullptr;
DXGI_OUTDUPL_FRAME_INFO lFrameInfo;
hr = _lDeskDupl->AcquireNextFrame(999, &lFrameInfo, &lDesktopResource);
if (FAILED(hr))
return false;
if (lFrameInfo.LastPresentTime.HighPart == 0) // not interested in just mouse updates, which can happen much faster than 60fps if you really shake the mouse
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
// QI for ID3D11Texture2D
hr = lDesktopResource->QueryInterface(__uuidof(ID3D11Texture2D), reinterpret_cast<void **>(&_lAcquiredDesktopImage));
lDesktopResource->Release();
lDesktopResource = nullptr;
if (FAILED(hr))
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
_lImmediateContext->CopyResource(currTexture, _lAcquiredDesktopImage);
UINT subresource = D3D11CalcSubresource(0, 0, 0);
_lImmediateContext->Map(currTexture, subresource, D3D11_MAP_READ, 0, &_resource);
_lImmediateContext->Unmap(currTexture, 0);
hr = _lDeskDupl->ReleaseFrame();
output = _result;
return true;
}
Please note that IScreenCapturer is just and interface to quickly swap implementations and also note that the return result is a cv::Mat object (OpenCV to do the rest of processing).
Any help on this issue? I'm still trying to figure out a solution that can capture the screen at least 30 FPS and that can capture the whole screen even when an app is running at full screen.

Related

Capture from desktop (VFR) to mp4 - how to deal with frame rate?

I'm putting frames from the Desktop Duplication API through a Media Foundation H264 encoder transform (in this case, NVIDIA's hardware accelerated encoder) and then into a SinkWriter to put the frames into an MP4 container. This whole process works quite well and is extremely fast.
The problem
The video is in slow motion, so to speak. I have the frame rates set everywhere to 60 fps, and the sample time and durations are based on that 60 fps. What is happening is that I am providing way more frames than 60 per second to the SinkWriter. I don't quite know how the MP4 format and video players work but I assume it is simply looking at the frame duration (16.7ms for 60fps) and since there are way more frames than 60, it appears slowed down.
I've had a very hard time finding out how to 'properly' limit the frames being provided to the encoder/sink writer. If I simply Sleep for 15ms or so, the video appears fine but I realize that's not the way to do it - it was just for testing and to confirm my theory. I've tried using the Frame Rate Converter DSP but it gives me an E_UNEXPECTED because I don't think it expects a 'live' source.
Essentially I think I need to do a variable frame rate to constant frame rate conversion.
My question
How would you normally deal with this issue? How do you do this? Are there ways to do it with a 'live' source in Media Foundation? Or is a manual implementation required (e.g. calculating, dropping frames if faster or duplicating them if slower, etc)?
Code provided below;
#define WIN32_LEAN_AND_MEAN
#include <iostream>
#include <mfapi.h>
#include <d3d11.h>
#include <d3d11_4.h>
#include <dxgi1_5.h>
#include <atlcomcli.h>
#include <mftransform.h>
#include <cassert>
#include <mfidl.h>
#include <mfreadwrite.h>
#include <wmcodecdsp.h>
#include <evr.h>
void SetupEncoder(ID3D11Device*, IMFTransform**);
void SetupFrameRateConverter(ID3D11Device*, IMFTransform**);
void SetupSinkWriter(IMFSinkWriter**);
void InitializeBuffer(IMFTransform*, MFT_OUTPUT_DATA_BUFFER*, UINT32);
int main()
{
SetProcessDpiAwarenessContext(DPI_AWARENESS_CONTEXT_PER_MONITOR_AWARE_V2);
IDXGIFactory1* dxgiFactory;
auto hr = CreateDXGIFactory1(__uuidof(IDXGIFactory1), (void**)(&dxgiFactory));
IDXGIAdapter* adapter = NULL;
dxgiFactory->EnumAdapters(0, &adapter);
DXGI_ADAPTER_DESC desc = {};
adapter->GetDesc(&desc);
printf("GPU %d: %S (Vendor %04x Device %04x)\n", 0, desc.Description, desc.VendorId, desc.DeviceId);
IDXGIOutput* output;
adapter->EnumOutputs(1, &output);
DXGI_OUTPUT_DESC outputDesc = {};
output->GetDesc(&outputDesc);
printf("Output %S\n", outputDesc.DeviceName);
IDXGIOutput5* dxgiOutput5;
output->QueryInterface(&dxgiOutput5);
// Set up D3D11
D3D_FEATURE_LEVEL featureLevels[] =
{
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
};
ID3D11Device* device;
D3D_FEATURE_LEVEL levelChosen;
ID3D11DeviceContext* deviceContext;
auto result = D3D11CreateDevice(adapter, D3D_DRIVER_TYPE_UNKNOWN, NULL, D3D11_CREATE_DEVICE_BGRA_SUPPORT, featureLevels, _countof(featureLevels), D3D11_SDK_VERSION, &device, &levelChosen, &deviceContext);
ID3D11Multithread* multithread;
device->QueryInterface(&multithread);
multithread->SetMultithreadProtected(true);
// Set up output duplication
DXGI_FORMAT formats[] =
{
DXGI_FORMAT_B8G8R8A8_UNORM
};
IDXGIOutputDuplication* duplication;
result = dxgiOutput5->DuplicateOutput1(device, 0, _countof(formats), formats, &duplication);
IMFTransform* encoder, * fpsConverter;
IMFSinkWriter* sinkWriter;
SetupEncoder(device, &encoder);
SetupFrameRateConverter(device, &fpsConverter);
SetupSinkWriter(&sinkWriter);
// Allocate buffers
IMFMediaType* outputType;
fpsConverter->GetOutputCurrentType(0, &outputType);
MFT_OUTPUT_DATA_BUFFER buffer;
DWORD status;
UINT32 uiFrameSize = 0;
hr = outputType->GetUINT32(MF_MT_SAMPLE_SIZE, &uiFrameSize);
InitializeBuffer(fpsConverter, &buffer, uiFrameSize);
// Event generator for async MFT
IMFMediaEventGenerator* eventGenerator;
encoder->QueryInterface(&eventGenerator);
// Start up
result = encoder->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL);
result = encoder->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL);
result = encoder->ProcessMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL);
long startTime = 0;
long frameDuration = (long)((1 / 60.f) * 10000000);
UINT frameCounter = 0;
while (frameCounter < 1000)
{
IMFMediaEvent* mediaEvent;
eventGenerator->GetEvent(0, &mediaEvent);
MediaEventType eventType;
mediaEvent->GetType(&eventType);
if (eventType == METransformNeedInput)
{
// Grab frame first
DXGI_OUTDUPL_FRAME_INFO frameInfo;
IDXGIResource* screenResource;
duplication->AcquireNextFrame(10000, &frameInfo, &screenResource);
ID3D11Texture2D* texture;
screenResource->QueryInterface(&texture);
// Verify correct screen for now
D3D11_TEXTURE2D_DESC d;
texture->GetDesc(&d);
assert(d.Width == 1920);
// Create sample for it
IMFSample* sample;
IMFMediaBuffer* mediaBuffer;
result = MFCreateVideoSampleFromSurface(NULL, &sample);
result = MFCreateDXGISurfaceBuffer(IID_ID3D11Texture2D, texture, 0, TRUE, &mediaBuffer);
result = sample->AddBuffer(mediaBuffer);
////////////////////////
// Does not work, E_UNEXPECTED
// Put it through the FPS converter
/*result = fpsConverter->ProcessInput(0, sample, 0);
if (FAILED(result))
break;
result = fpsConverter->ProcessOutput(0, 1, &buffer, &status);*/
///////////////////////
sample->SetSampleDuration(frameDuration);
sample->SetSampleTime(startTime);
startTime += frameDuration;
result = encoder->ProcessInput(0, sample, 0);
sample->Release();
mediaBuffer->Release();
++frameCounter;
// Important, do not forget to release frame
duplication->ReleaseFrame();
}
else if (eventType == METransformHaveOutput)
{
MFT_OUTPUT_DATA_BUFFER encodingOutputBuffer;
encodingOutputBuffer.dwStreamID = 0;
encodingOutputBuffer.pSample = nullptr;
encodingOutputBuffer.dwStatus = 0;
encodingOutputBuffer.pEvents = nullptr;
result = encoder->ProcessOutput(0, 1, &encodingOutputBuffer, 0);
// Now write to sink
sinkWriter->WriteSample(0, encodingOutputBuffer.pSample);
if (encodingOutputBuffer.pSample)
encodingOutputBuffer.pSample->Release();
if (encodingOutputBuffer.pEvents)
encodingOutputBuffer.pEvents->Release();
}
}
encoder->ProcessMessage(MFT_MESSAGE_NOTIFY_END_OF_STREAM, NULL);
encoder->ProcessMessage(MFT_MESSAGE_NOTIFY_END_STREAMING, NULL);
encoder->ProcessMessage(MFT_MESSAGE_COMMAND_FLUSH, NULL);
result = sinkWriter->Finalize();
sinkWriter->Release();
duplication->Release();
adapter->Release();
device->Release();
}
void SetupEncoder(ID3D11Device* device, IMFTransform** encoderOut)
{
MFStartup(MF_VERSION, MFSTARTUP_FULL);
IMFAttributes* ptr = NULL;
MFCreateAttributes(&ptr, 0);
UINT token;
IMFDXGIDeviceManager* deviceManager;
MFCreateDXGIDeviceManager(&token, &deviceManager);
deviceManager->ResetDevice(device, token);
MFT_REGISTER_TYPE_INFO outputType;
outputType.guidMajorType = MFMediaType_Video;
outputType.guidSubtype = MFVideoFormat_H264;
IMFActivate** activates = NULL;
UINT count = 0;
MFTEnumEx(MFT_CATEGORY_VIDEO_ENCODER, MFT_ENUM_FLAG_HARDWARE | MFT_ENUM_FLAG_SORTANDFILTER, NULL, &outputType, &activates, &count);
IMFTransform* encoder;
activates[0]->ActivateObject(IID_PPV_ARGS(&encoder));
// Release the rest
for (UINT32 i = 0; i < count; i++)
{
activates[i]->Release();
}
IMFAttributes* attribs;
encoder->GetAttributes(&attribs);
// Required
attribs->SetUINT32(MF_TRANSFORM_ASYNC_UNLOCK, 1);
attribs->SetUINT32(MF_LOW_LATENCY, 1);
LPWSTR friendlyName = 0;
UINT friendlyNameLength;
attribs->GetAllocatedString(MFT_FRIENDLY_NAME_Attribute, &friendlyName, &friendlyNameLength);
printf("Using encoder %S", friendlyName);
auto result = encoder->ProcessMessage(MFT_MESSAGE_SET_D3D_MANAGER, reinterpret_cast<ULONG_PTR>(deviceManager));
DWORD inputStreamId, outputStreamId;
encoder->GetStreamIDs(1, &inputStreamId, 1, &outputStreamId);
// Set up output media type
IMFMediaType* mediaType;
MFCreateMediaType(&mediaType);
mediaType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
mediaType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
mediaType->SetUINT32(MF_MT_AVG_BITRATE, 10240000);
mediaType->SetUINT32(MF_MT_INTERLACE_MODE, 2);
mediaType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 1);
MFSetAttributeSize(mediaType, MF_MT_FRAME_SIZE, 1920, 1080);
MFSetAttributeRatio(mediaType, MF_MT_FRAME_RATE, 60000, 1001);
result = encoder->SetOutputType(outputStreamId, mediaType, 0);
// Set up input media type
IMFMediaType* suggestedInputType;
result = encoder->GetInputAvailableType(inputStreamId, 0, &suggestedInputType);
suggestedInputType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
suggestedInputType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_NV12);
MFSetAttributeSize(suggestedInputType, MF_MT_FRAME_SIZE, 1920, 1080);
MFSetAttributeRatio(suggestedInputType, MF_MT_FRAME_RATE, 60000, 1001);
result = encoder->SetInputType(inputStreamId, suggestedInputType, 0);
*encoderOut = encoder;
}
void SetupFrameRateConverter(ID3D11Device* device, IMFTransform** fpsConverterTransformOut)
{
// Set up DSP
IMFTransform* fpsConverter;
CoCreateInstance(CLSID_CFrameRateConvertDmo, NULL, CLSCTX_INPROC_SERVER, IID_IMFTransform, reinterpret_cast<void**>(&fpsConverter));
// Set up fps input type
IMFMediaType* mediaType;
MFCreateMediaType(&mediaType);
UINT32 imageSize;
MFCalculateImageSize(MFVideoFormat_ARGB32, 1920, 1080, &imageSize);
mediaType->SetUINT32(MF_MT_SAMPLE_SIZE, imageSize);
mediaType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
mediaType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_ARGB32);
MFSetAttributeSize(mediaType, MF_MT_FRAME_SIZE, 1920, 1080);
auto result = fpsConverter->SetInputType(0, mediaType, 0);
// Set up fps output type
MFSetAttributeRatio(mediaType, MF_MT_FRAME_RATE, 60000, 1001);
result = fpsConverter->SetOutputType(0, mediaType, 0);
// Start up FPS
fpsConverter->ProcessMessage(MFT_MESSAGE_NOTIFY_BEGIN_STREAMING, NULL);
fpsConverter->ProcessMessage(MFT_MESSAGE_NOTIFY_START_OF_STREAM, NULL);
*fpsConverterTransformOut = fpsConverter;
}
void SetupSinkWriter(IMFSinkWriter** sinkWriterOut)
{
IMFAttributes* attribs;
MFCreateAttributes(&attribs, 0);
attribs->SetUINT32(MF_LOW_LATENCY, 1);
attribs->SetUINT32(MF_READWRITE_ENABLE_HARDWARE_TRANSFORMS, 1);
attribs->SetGUID(MF_TRANSCODE_CONTAINERTYPE, MFTranscodeContainerType_MPEG4);
attribs->SetUINT32(MF_SINK_WRITER_DISABLE_THROTTLING, 1);
IMFSinkWriter* sinkWriter;
MFCreateSinkWriterFromURL(L"output.mp4", NULL, attribs, &sinkWriter);
// Set up input type
IMFMediaType* mediaType;
MFCreateMediaType(&mediaType);
mediaType->SetGUID(MF_MT_MAJOR_TYPE, MFMediaType_Video);
mediaType->SetGUID(MF_MT_SUBTYPE, MFVideoFormat_H264);
mediaType->SetUINT32(MF_MT_AVG_BITRATE, 10240000);
MFSetAttributeSize(mediaType, MF_MT_FRAME_SIZE, 1920, 1080);
MFSetAttributeRatio(mediaType, MF_MT_FRAME_RATE, 60000, 1001);
MFSetAttributeRatio(mediaType, MF_MT_PIXEL_ASPECT_RATIO, 1, 1);
mediaType->SetUINT32(MF_MT_INTERLACE_MODE, 2);
mediaType->SetUINT32(MF_MT_ALL_SAMPLES_INDEPENDENT, 0);
DWORD streamIndex;
auto result = sinkWriter->AddStream(mediaType, &streamIndex);
result = sinkWriter->SetInputMediaType(streamIndex, mediaType, NULL);
sinkWriter->BeginWriting();
*sinkWriterOut = sinkWriter;
}
void InitializeBuffer(IMFTransform* transform, MFT_OUTPUT_DATA_BUFFER* buffer, const UINT32 frameSize)
{
MFT_OUTPUT_STREAM_INFO outputStreamInfo;
DWORD outputStreamId = 0;
ZeroMemory(&outputStreamInfo, sizeof(outputStreamInfo));
ZeroMemory(buffer, sizeof(*buffer));
auto hr = transform->GetOutputStreamInfo(outputStreamId, &outputStreamInfo);
if (SUCCEEDED(hr))
{
if ((outputStreamInfo.dwFlags & MFT_OUTPUT_STREAM_PROVIDES_SAMPLES) == 0 &&
(outputStreamInfo.dwFlags & MFT_OUTPUT_STREAM_CAN_PROVIDE_SAMPLES) == 0) {
IMFSample* pOutputSample = NULL;
IMFMediaBuffer* pMediaBuffer = NULL;
hr = MFCreateSample(&pOutputSample);
if (SUCCEEDED(hr)) {
hr = MFCreateMemoryBuffer(frameSize, &pMediaBuffer);
}
if (SUCCEEDED(hr)) {
hr = pOutputSample->AddBuffer(pMediaBuffer);
}
if (SUCCEEDED(hr)) {
buffer->pSample = pOutputSample;
buffer->pSample->AddRef();
}
pMediaBuffer->Release();
pOutputSample->Release();
}
else
{
std::cout << "Stream provides samples";
}
}
}

Error passing CoreWindow as function parameter

I am new to UWP and WinRT, I already knew DirectX11 and Win32 and I am quite interested in learning UWP and WinRT.
DXMainResources is a class to use Diret3D and Direct2D, and the most important thing when creating resources is the window (CoreWindow), however, when passing the window as a function parameter, many errors appear such as:
C2065 error 'bEnableDepthStencilBuffer': undeclared identifier
Error C2065 'window': undeclared identifier
Error C2065 'window_': undeclared identifier
Faced with these errors, remove the CoreWindow window parameter and the program compiles, but the window as a parameter is necessary, since I need its height and width to create the swap chain and the texture for the depth stencil view.
Is there a way to pass the window as a parameter?
I'm using DirectX 11.3 interfaces and the WinRT version I'm using is 2.0.201026.4
#pragma once
#include "pch.h"
#include <d3d11_3.h>
#include <d2d1_3.h>
#include <dxgi1_4.h>
#include <dwrite_3.h>
#include <array>
using namespace D2D1;
template<class Interface> void TSafeRelease(Interface** ppInterface)
{
if (*ppInterface)
{
(*ppInterface)->Release();
(*ppInterface) = nullptr;
}
}
class DXMainResources
{
public:
DXMainResources()
{
d3dDev = nullptr;
d3dDevContext = nullptr;
d3dRTView = nullptr;
d3dDSView = nullptr;
d2dDev = nullptr;
d2dDevContext = nullptr;
d2dTargetBitmap = nullptr;
dxgiFactory = nullptr;
dxgiDev = nullptr;
dxgiDefaultAdapter = nullptr;
dxgiSwapChain = nullptr;
dwriteFactory = nullptr;
dwriteTextFmt = nullptr;
featureLevel = static_cast<D3D_FEATURE_LEVEL>(0);
dxgiSwapChainFmt = DXGI_FORMAT_UNKNOWN;
dxgiDepthStencilFmt = DXGI_FORMAT_UNKNOWN;
viewPort = D3D11_VIEWPORT();
}
~DXMainResources()
{
}
void CreateResources(CoreWindow const& window, bool bOnlyDirect2D = false, bool bEnableDepthStencilBuffer = true
, DXGI_FORMAT dxgiSwapChainFmt_ = DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT dxgiDepthStencilFmt_ = DXGI_FORMAT_D24_UNORM_S8_UINT
, DXGI_SWAP_EFFECT swapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL)
{
dxgiSwapChainFmt = dxgiSwapChainFmt_;
dxgiDepthStencilFmt = dxgiDepthStencilFmt_;
CoreWindow window_ = window;
std::array<D3D_FEATURE_LEVEL, 9> featureLevels =
{
D3D_FEATURE_LEVEL_12_1,
D3D_FEATURE_LEVEL_12_0,
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
D3D_FEATURE_LEVEL_9_3,
D3D_FEATURE_LEVEL_9_2,
D3D_FEATURE_LEVEL_9_1,
};
UINT deviceFlags = D3D11_CREATE_DEVICE_DEBUG | D3D11_CREATE_DEVICE_BGRA_SUPPORT;
HRESULT hr = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, deviceFlags, featureLevels.data(), static_cast<UINT>(std::size(featureLevels))
, D3D11_SDK_VERSION, reinterpret_cast<ID3D11Device**>(&d3dDev), &featureLevel, nullptr);
hr = d3dDev->QueryInterface(__uuidof(IDXGIDevice3), reinterpret_cast<void**>(&dxgiDev));
hr = dxgiDev->GetAdapter(reinterpret_cast<IDXGIAdapter**>(&dxgiDefaultAdapter));
hr = dxgiDefaultAdapter->GetParent(__uuidof(IDXGIFactory4), reinterpret_cast<void**>(&dxgiFactory));
DXGI_SWAP_CHAIN_DESC1 swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC1));
swapChainDesc.BufferCount = 2;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.Format = dxgiSwapChainFmt_;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SwapEffect = swapEffect;
hr = dxgiFactory->CreateSwapChainForCoreWindow(d3dDev, reinterpret_cast<IUnknown*>(window_)
, &swapChainDesc, nullptr, reinterpret_cast<IDXGISwapChain1**>(&dxgiSwapChain));
if (bOnlyDirect2D)
{
IDXGISurface2* dxgiBackBufferSurface = nullptr;
hr = dxgiSwapChain->GetBuffer(0, __uuidof(IDXGISurface2), reinterpret_cast<void**>(&dxgiBackBufferSurface));
D2D1_CREATION_PROPERTIES creationProps = CreationProperties(D2D1_THREADING_MODE_SINGLE_THREADED, D2D1_DEBUG_LEVEL_INFORMATION, D2D1_DEVICE_CONTEXT_OPTIONS_NONE);
D2D1_BITMAP_PROPERTIES1 bmpProps = BitmapProperties1(D2D1_BITMAP_OPTIONS_CANNOT_DRAW | D2D1_BITMAP_OPTIONS_TARGET
, PixelFormat(dxgiSwapChainFmt_, D2D1_ALPHA_MODE_IGNORE));
hr = D2D1CreateDevice(dxgiDev, creationProps, reinterpret_cast<ID2D1Device**>(&d2dDev));
hr = d2dDev->CreateDeviceContext(creationProps.options, &d2dDevContext);
hr = d2dDevContext->CreateBitmapFromDxgiSurface(dxgiBackBufferSurface, bmpProps, &d2dTargetBitmap);
d2dDevContext->SetTarget(d2dTargetBitmap);
TSafeRelease(&dxgiBackBufferSurface);
}
else
{
IDXGISurface2* dxgiBackBufferSurface = nullptr;
hr = dxgiSwapChain->GetBuffer(0, __uuidof(IDXGISurface2), reinterpret_cast<void**>(&dxgiBackBufferSurface));
D2D1_CREATION_PROPERTIES creationProps = CreationProperties(D2D1_THREADING_MODE_SINGLE_THREADED, D2D1_DEBUG_LEVEL_INFORMATION, D2D1_DEVICE_CONTEXT_OPTIONS_NONE);
D2D1_BITMAP_PROPERTIES1 bmpProps = BitmapProperties1(D2D1_BITMAP_OPTIONS_CANNOT_DRAW | D2D1_BITMAP_OPTIONS_TARGET
, PixelFormat(dxgiSwapChainFmt_, D2D1_ALPHA_MODE_IGNORE));
hr = D2D1CreateDevice(dxgiDev, creationProps, reinterpret_cast<ID2D1Device**>(&d2dDev));
hr = d2dDev->CreateDeviceContext(creationProps.options, &d2dDevContext);
hr = d2dDevContext->CreateBitmapFromDxgiSurface(dxgiBackBufferSurface, bmpProps, &d2dTargetBitmap);
d2dDevContext->SetTarget(d2dTargetBitmap);
TSafeRelease(&dxgiBackBufferSurface);
d3dDev->GetImmediateContext3(&d3dDevContext);
ID3D11Texture2D1* d3dBackBufferTex = nullptr;
hr = dxgiSwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D1), reinterpret_cast<void**>(&d3dBackBufferTex));
hr = d3dDev->CreateRenderTargetView1(d3dBackBufferTex, nullptr, &d3dRTView);
TSafeRelease(&d3dBackBufferTex);
if (bEnableDepthStencilBuffer)
{
ID3D11Texture2D1* d3dDepthStencilTex = nullptr;
D3D11_TEXTURE2D_DESC1 depthStencilTexDesc;
ZeroMemory(&depthStencilTexDesc, sizeof(D3D11_TEXTURE2D_DESC1));
depthStencilTexDesc.ArraySize = 1;
depthStencilTexDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
depthStencilTexDesc.Format = dxgiDepthStencilFmt_;
depthStencilTexDesc.Height = window_.Bounds().Height;
depthStencilTexDesc.MipLevels = 1;
depthStencilTexDesc.SampleDesc.Count = 1;
depthStencilTexDesc.Width = window_.Bounds().Width;
hr = d3dDev->CreateTexture2D1(&depthStencilTexDesc, nullptr, &d3dDepthStencilTex);
hr = d3dDev->CreateDepthStencilView(d3dDepthStencilTex, nullptr, &d3dDSView);
d3dDevContext->OMSetRenderTargets(1, reinterpret_cast<ID3D11RenderTargetView**>(&d3dRTView), d3dDSView);
TSafeRelease(&d3dDepthStencilTex);
}
else
{
d3dDevContext->OMSetRenderTargets(1, reinterpret_cast<ID3D11RenderTargetView**>(&d3dRTView), nullptr);
}
viewPort.Height = window_.Bounds().Height;
viewPort.MaxDepth = 1.0f;
viewPort.Width = window_.Bounds().Width;
d3dDevContext->RSSetViewports(1, &viewPort);
}
hr = DWriteCreateFactory(DWRITE_FACTORY_TYPE_SHARED, __uuidof(IDWriteFactory3), reinterpret_cast<IUnknown**>(&dwriteFactory));
hr = dwriteFactory->CreateTextFormat(L"Segoe", nullptr, DWRITE_FONT_WEIGHT_NORMAL, DWRITE_FONT_STYLE_NORMAL, DWRITE_FONT_STRETCH_NORMAL, 11.0f
, L"en-US", reinterpret_cast<IDWriteTextFormat**>(&dwriteTextFmt));
}
ID3D11Device3* GetD3D11Device() const { return d3dDev; }
ID3D11DeviceContext3* GetD3D11DeviceContext() const { return d3dDevContext; }
ID3D11RenderTargetView1* GetD3D11RenderTargetView() const { return d3dRTView; }
ID3D11DepthStencilView* GetD3D11DepthStencilView() const { return d3dDSView; }
ID2D1Device2* GetD2DDevice() const { return d2dDev; }
ID2D1DeviceContext2* GetD2DDeviceContext() const { return d2dDevContext; }
ID2D1Bitmap1* GetD2DTargetBitmap() const { return d2dTargetBitmap; }
IDXGIFactory4* GetDXGIFactory() const { return dxgiFactory; }
IDXGIDevice3* GetDXGIDevice() const { return dxgiDev; }
IDXGIAdapter3* GetDXGIDefaultAdapter() const { return dxgiDefaultAdapter; }
IDXGISwapChain3* GetDXGISwapChain() const { return dxgiSwapChain; }
IDWriteFactory* GetDWriteFactory() const { return dwriteFactory; }
IDWriteTextFormat3* GetDWriteTextFormat() const { return dwriteTextFmt; }
D3D_FEATURE_LEVEL GetD3DDeviceFeatureLevel() { return featureLevel; }
DXGI_FORMAT GetDXGISwapChainFormat() { return dxgiSwapChainFmt; }
DXGI_FORMAT GetDXGIDepthStencilFormat() { return dxgiDepthStencilFmt; }
D3D11_VIEWPORT GetD3D11ViewPort() { return viewPort; }
private:
ID3D11Device3* d3dDev;
ID3D11DeviceContext3* d3dDevContext;
ID3D11RenderTargetView1* d3dRTView;
ID3D11DepthStencilView* d3dDSView;
ID2D1Device2* d2dDev;
ID2D1DeviceContext2* d2dDevContext;
ID2D1Bitmap1* d2dTargetBitmap;
IDXGIFactory4* dxgiFactory;
IDXGIDevice3* dxgiDev;
IDXGIAdapter3* dxgiDefaultAdapter;
IDXGISwapChain3* dxgiSwapChain;
IDWriteFactory* dwriteFactory;
IDWriteTextFormat3* dwriteTextFmt;
D3D_FEATURE_LEVEL featureLevel;
DXGI_FORMAT dxgiSwapChainFmt;
DXGI_FORMAT dxgiDepthStencilFmt;
D3D11_VIEWPORT viewPort;
};
Refer to the Important in the document, please add the following #include statement and using statement, then try to compile your project.
#include <winrt/Windows.UI.Core.h>
using namespace winrt::Windows::UI::Core;

Direct X: How to display an image loaded in a texture?

I'm new to DirectX and have been reading tons of tutorials and samples, however I'm unable to find any documentation on how to directly display an image that is loaded into a Texture2D on the screen. Almost all tutorials I've seen deal with 3D graphics, shaders, etc. However, I just want to display the contents of the texture.
Here's what I have so far:
DeviceResources.cpp:
#include "DeviceResources.h"
#include "Renderer.h"
DeviceResources::DeviceResources()
{
}
HRESULT DeviceResources::CreateDeviceResources(HWND hwnd)
{
D3D_FEATURE_LEVEL levels[] = {
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_12_0,
D3D_FEATURE_LEVEL_12_1
};
UINT flags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
DXGI_SWAP_CHAIN_DESC swap_chain_desc;
ZeroMemory(&swap_chain_desc, sizeof(DXGI_SWAP_CHAIN_DESC));
swap_chain_desc.Windowed = TRUE;
swap_chain_desc.BufferCount = 2;
swap_chain_desc.BufferDesc.Format = DXGI_FORMAT_R10G10B10A2_UNORM;
swap_chain_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swap_chain_desc.SampleDesc.Count = 1;
swap_chain_desc.SampleDesc.Quality = 0;
swap_chain_desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
swap_chain_desc.OutputWindow = hwnd;
Microsoft::WRL::ComPtr<ID3D11Device> device;
Microsoft::WRL::ComPtr<ID3D11DeviceContext> context;
Microsoft::WRL::ComPtr<IDXGISwapChain> swapChain;
D3D11CreateDeviceAndSwapChain(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
flags,
levels,
ARRAYSIZE(levels),
D3D11_SDK_VERSION,
&swap_chain_desc,
swapChain.GetAddressOf(),
device.GetAddressOf(),
&m_feature_level,
context.GetAddressOf()
);
device.As(&m_device);
context.As(&m_context);
swapChain.As(&m_swapChain);
cv::directx::ocl::initializeContextFromD3D11Device(m_device.Get());
auto hdr = Renderer::HDRMetadata();
m_swapChain->SetHDRMetaData(DXGI_HDR_METADATA_TYPE_HDR10, sizeof(DXGI_HDR_METADATA_HDR10), &hdr);
m_swapChain->SetColorSpace1(DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020);
m_swapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), static_cast<void**>(& m_backBuffer));
m_backBuffer->GetDesc(&m_bbDesc);
ZeroMemory(&m_viewport, sizeof(D3D11_VIEWPORT));
m_viewport.Height = static_cast<float>(m_bbDesc.Height);
m_viewport.Width = static_cast<float>(m_bbDesc.Width);
m_viewport.MinDepth = 0;
m_viewport.MaxDepth = 1;
m_context->RSSetViewports(1, &m_viewport);
m_device->CreateRenderTargetView(m_backBuffer.Get(), nullptr, m_renderTargetView.GetAddressOf());
return S_OK;
}
HRESULT DeviceResources::ConfigureBackBuffer()
{
m_swapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), static_cast<void**>(& m_backBuffer));
m_device->CreateRenderTargetView(m_backBuffer.Get(), nullptr, m_renderTargetView.GetAddressOf());
m_backBuffer->GetDesc(&m_bbDesc);
ZeroMemory(&m_viewport, sizeof(D3D11_VIEWPORT));
m_viewport.Height = static_cast<float>(m_bbDesc.Height);
m_viewport.Width = static_cast<float>(m_bbDesc.Width);
m_viewport.MinDepth = 0;
m_viewport.MaxDepth = 1;
m_context->RSSetViewports(1, &m_viewport);
return S_OK;
}
HRESULT DeviceResources::ReleaseBackBuffer()
{
m_renderTargetView.Reset();
m_backBuffer.Reset();
m_context->Flush();
return S_OK;
}
HRESULT DeviceResources::SetFullscreen(bool fullscreen)
{
m_swapChain->SetFullscreenState(fullscreen, nullptr);
ReleaseBackBuffer();
m_swapChain->ResizeBuffers(0, 0, 0, DXGI_FORMAT_UNKNOWN, 0);
ConfigureBackBuffer();
return S_OK;
}
void DeviceResources::Present()
{
m_swapChain->Present(1, 0);
}
DeviceResources::~DeviceResources()
= default;
Renderer.cpp:
#include "Renderer.h"
#include <utility>
#include <comdef.h>
#include <vector>
Renderer::Renderer(std::shared_ptr<DeviceResources> resources) : m_resources(std::move(resources)), m_frameCount(0)
{
m_frameCount = 0;
}
HRESULT Renderer::CreateDeviceDependentResources()
{
return S_OK;
}
HRESULT Renderer::CreateWindowSizeDependentResources()
{
return S_OK;
}
void Renderer::Update()
{
//
}
void Renderer::Render()
{
cv::Mat mat = cv::imread("C:/Users/Richard/Downloads/orig_cave_L.ppm", cv::IMREAD_ANYCOLOR | cv::IMREAD_ANYDEPTH);
cv::Mat as4channelMat(mat.size(), CV_MAKE_TYPE(mat.depth(), 4));
int conversion[] = { 0, 0, 1, 1, 2, 2, -1, 3 };
cv::mixChannels(&mat, 1, &as4channelMat, 1, conversion, 4);
D3D11_TEXTURE2D_DESC desc;
desc.Width = 3840;
desc.Height = 2160;
desc.MipLevels = desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R16G16B16A16_UNORM;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = 0;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE | D3D11_CPU_ACCESS_READ;
desc.MiscFlags = 0;
ID3D11Texture2D* tex = nullptr;
auto hr = m_resources->GetDevice()->CreateTexture2D(&desc, nullptr, &tex);
if FAILED(hr)
{
_com_error err(hr);
auto errMsg = err.ErrorMessage();
}
try {
cv::directx::convertToD3D11Texture2D(as4channelMat, tex);
} catch (cv::Exception& e)
{
std::cerr << "ERROR: " << e.msg << std::endl;
throw e;
}
auto hr3 = m_resources->m_device->CreateShaderResourceView(tex.Get(), nullptr, m_texture.GetAddressOf());
if FAILED(hr3)
{
_com_error err(hr3);
auto errMsg = err.ErrorMessage();
}
std::unique_ptr<DirectX::SpriteBatch> m_spriteBatch;
DirectX::SimpleMath::Vector2 m_screenPos, m_origin;
m_spriteBatch = std::make_unique<DirectX::SpriteBatch>(m_resources->m_context.Get());
CD3D11_TEXTURE2D_DESC catDesc;
tex->GetDesc(&catDesc);
m_origin.x = float(catDesc.Width / 2);
m_origin.y = float(catDesc.Height / 2);
m_screenPos.x = m_resources->m_bbDesc.Width / 2.f;
m_screenPos.y = m_resources->m_bbDesc.Height / 2.f;
m_spriteBatch->Begin();
m_spriteBatch->Draw(
m_texture.Get(),
m_screenPos,
nullptr,
DirectX::Colors::White,
0.0f,
m_origin
);
m_spriteBatch->End();
}
DXGI_HDR_METADATA_HDR10 Renderer::HDRMetadata()
{
//
}
Renderer::~Renderer()
{
}
From my understand, I have to somehow create a "Quad", apply the texture to it, and then display the quad itself. However,I am unsure how to do any of this and can't find any resources to help.
Edit: Given the recommendations, I have tried using DirectXTK, specifically SpriteBatch. I followed the relevant instructions in the documentation, however Draw doesn't seem to do / display anything. (In Renderer.cpp)

DirectX11 DXGI Capture Screen when in full screen

I'm currently trying to develop a personal project where I will light up LEDs according to what's happening on the screen of my computer.
I've tried several solutions to capture my screen, and DirectX11 with DXGI is the fastest way I found to have a good FPS Rate.
My only issue is the following: When in full screen (for example, watching Netflix trogh Win10 App or playing any game in fullscreen), it seems that nothing is captured. I have two functions (one to set up and another one to capture a frame:
Setup Up Function:
bool DXGIScreenCapturer::Init() {
int lresult(-1);
D3D_FEATURE_LEVEL lFeatureLevel;
HRESULT hr(E_FAIL);
hr = D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
0,
gFeatureLevels,
gNumFeatureLevels,
D3D11_SDK_VERSION,
&_lDevice,
&lFeatureLevel,
&_lImmediateContext);
if (FAILED(hr))
return false;
if (!_lDevice)
return false;
// Get DXGI device
ComPtr<IDXGIDevice> lDxgiDevice;
hr = _lDevice.As(&lDxgiDevice);
if (FAILED(hr))
return false;
// Get DXGI adapter
ComPtr<IDXGIAdapter> lDxgiAdapter;
hr = lDxgiDevice->GetParent(__uuidof(IDXGIAdapter), &lDxgiAdapter);
if (FAILED(hr))
return false;
lDxgiDevice.Reset();
UINT Output = 0;
// Get output
ComPtr<IDXGIOutput> lDxgiOutput;
hr = lDxgiAdapter->EnumOutputs(Output, &lDxgiOutput);
if (FAILED(hr))
return false;
lDxgiAdapter.Reset();
hr = lDxgiOutput->GetDesc(&_lOutputDesc);
if (FAILED(hr))
return false;
// QI for Output 1
ComPtr<IDXGIOutput1> lDxgiOutput1;
hr = lDxgiOutput.As(&lDxgiOutput1);
if (FAILED(hr))
return false;
lDxgiOutput.Reset();
// Create desktop duplication
hr = lDxgiOutput1->DuplicateOutput(_lDevice.Get(), &_lDeskDupl);
if (FAILED(hr))
return false;
lDxgiOutput1.Reset();
// Create GUI drawing texture
_lDeskDupl->GetDesc(&_lOutputDuplDesc);
// Create CPU access texture
_desc.Width = _lOutputDuplDesc.ModeDesc.Width;
_desc.Height = _lOutputDuplDesc.ModeDesc.Height;
_desc.Format = _lOutputDuplDesc.ModeDesc.Format;
std::cout << _desc.Width << "x" << _desc.Height << "\n\n\n";
_desc.ArraySize = 1;
_desc.BindFlags = 0;
_desc.MiscFlags = 0;
_desc.SampleDesc.Count = 1;
_desc.SampleDesc.Quality = 0;
_desc.MipLevels = 1;
_desc.CPUAccessFlags = D3D11_CPU_ACCESS_FLAG::D3D11_CPU_ACCESS_READ;
_desc.Usage = D3D11_USAGE::D3D11_USAGE_STAGING;
while (!CaptureScreen(_result));
_result = cv::Mat(_desc.Height, _desc.Width, CV_8UC4, _resource.pData);
return true;
}
And the capture function:
bool DXGIScreenCapturer::CaptureScreen(cv::Mat& output)
{
HRESULT hr(E_FAIL);
ComPtr<IDXGIResource> lDesktopResource = nullptr;
DXGI_OUTDUPL_FRAME_INFO lFrameInfo;
ID3D11Texture2D* currTexture = NULL;
hr = _lDeskDupl->AcquireNextFrame(999, &lFrameInfo, &lDesktopResource);
if (FAILED(hr))
return false;
if (lFrameInfo.LastPresentTime.HighPart == 0) // not interested in just mouse updates, which can happen much faster than 60fps if you really shake the mouse
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
//int accum_frames = lFrameInfo.AccumulatedFrames;
//if (accum_frames > 1) {// && current_frame != 1) {
// // TOO MANY OF THESE is the problem
// // especially after having to wait >17ms in AcquireNextFrame()
//}
// QI for ID3D11Texture2D
hr = lDesktopResource.As(&_lAcquiredDesktopImage);
// Copy image into a newly created CPU access texture
hr = _lDevice->CreateTexture2D(&_desc, NULL, &currTexture);
if (FAILED(hr))
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
if (!currTexture)
{
hr = _lDeskDupl->ReleaseFrame();
return false;
}
_lImmediateContext->CopyResource(currTexture, _lAcquiredDesktopImage.Get());
UINT subresource = D3D11CalcSubresource(0, 0, 0);
_lImmediateContext->Map(currTexture, subresource, D3D11_MAP_READ, 0, &_resource);
_lImmediateContext->Unmap(currTexture, 0);
currTexture->Release();
hr = _lDeskDupl->ReleaseFrame();
output = _result;
return true;
}

DirectX 11 CreateSwapChain() fails with error DXGI_ERROR_INVALID_CALL

Everytime I try to create the swapChain it throws this error.
After hours searching for a fix for this I found nothing that worked for me.
Here's the important part of the code:
bool Direct3D::Initialize(HWND hWnd)
{
HRESULT hResult;
ID3D11Device* pDevice = NULL;
ID3D11DeviceContext* pDeviceContext = NULL;
IDXGIDevice* pDXGIDevice = NULL;
IDXGIAdapter* pAdapter = NULL;
IDXGIFactory* pFactory = NULL;
IDXGISwapChain* pSwapChain = NULL;
D3D_FEATURE_LEVEL featureLevels[] = { //Add feature levels to support here
D3D_FEATURE_LEVEL_11_0
};
#ifdef _DEBUG
UINT deviceFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT | D3D11_CREATE_DEVICE_DEBUG;
#else
UINT deviceFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
#endif
//Create the device and deviceContext
hResult = D3D11CreateDevice(NULL, //needs to be NULL if D3D_DRIVER_TYPE_HARDWARE is used; NULL takes the default adapter
D3D_DRIVER_TYPE_HARDWARE,
NULL, //needs to be not NULL if D3D_DRIVER_TYPE_SOFTWARE is used
deviceFlags,
featureLevels,
ARRAYSIZE(featureLevels),
D3D11_SDK_VERSION,
&pDevice,
NULL,
&pDeviceContext);
if (FAILED(hResult))
{
return false;
}
hResult = pDevice->QueryInterface(__uuidof(IDXGIDevice), (void**)&pDXGIDevice);
if (FAILED(hResult))
{
return false;
}
hResult = pDXGIDevice->GetAdapter(&pAdapter);
if (FAILED(hResult))
{
return false;
}
hResult = pAdapter->GetParent(__uuidof(IDXGIFactory), (void**)&pFactory);
if (FAILED(hResult))
{
return false;
}
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
bufferDesc.Width = 0; //Zero for evaluating it from the output window
bufferDesc.Height = 0; //Zero for evaluating it from the output window
bufferDesc.RefreshRate.Numerator = config.refreshRate;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 1;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.Windowed = config.fullscreen;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
swapChainDesc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH | DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
hResult = pFactory->CreateSwapChain(pDevice, &swapChainDesc, &pSwapChain);
CGE_SAFE_RELEASE(pDXGIDevice);
CGE_SAFE_RELEASE(pAdapter);
CGE_SAFE_RELEASE(pFactory);
if (FAILED(hResult))
{
return false;
}
return true;
}
Looking at the documentation for CreateSwapChain() it seems to be that pSwapChain has to be not NULL, but that doesn't make sense in my opinion because I want to specify pSwapChain with CreateSwapChain().
Does anyone know a solution for this problem?
Your IDXGISwapChain should be associated with SwapChainPanel XAML control(or HWND if you are running Win32 application). You can do initialization like this:
hr = dxgiFactory2->CreateSwapChainForHwnd( g_pd3dDevice, g_hWnd, &sd, nullptr, nullptr, &g_pSwapChain1 );
if (SUCCEEDED(hr))
{
hr = g_pSwapChain1->QueryInterface( __uuidof(IDXGISwapChain), reinterpret_cast<void**>(&g_pSwapChain) );
}
This code is from microsoft Win32 DirectX sample.
https://code.msdn.microsoft.com/windowsdesktop/Direct3D-Tutorial-Win32-829979ef/view/Discussions#content
If you are running WinRT application you can look trough DirectX and XAML application template.
You are passing in the address of your Swap Chain Pointer. This is so the Create device and swap chain function can fill that pointer out with information. Here is a example.
//loop through our driver types till we find the one we will be using
for (unsigned int i = 0; i < DriverCount; i++)
{
//Create our device and swap chain
DXERROR = D3D11CreateDeviceAndSwapChain(nullptr, drivers[i], nullptr,
Flag, levels, LevelsCount, D3D11_SDK_VERSION, &SwapDesc, &DX.pSwapChain,
&DX.pDevice, &DX.FeatureLevel, &DX.pImmediateContext);
if (SUCCEEDED(DXERROR))
{
DX.DriverType = drivers[i];
break;
}
}