Capture Image from Secondary Monitor, cropping, and saving - c++

I'm trying to write an application that takes a screen shot of an area on one of the monitors and saves it to an image. The Screen shot should be taken at a x,y coordinate with a given width and height. When the image saves it should be saved at 0,0 and the same width height. I have this working on my main monitor but when I attempt to take the screen shot of my 2nd or 3rd monitor it doesnt work. I either get a black image or a file that when opened says its an invalid format.
My code is a modified version of the following code that takes a screen shot of the entire desktop.
https://github.com/GERD0GDU/dxgi_desktop_capture
Main Monitor width/height
2560 X 1440
Main monitor top corner x/y coordinates are 0,0 retrieved by calling
POINT cursorPos;
GetCursorPos(&cursorPos);
Secondary Monitor width/height
1080 x 1920
Secondary monitor top corner x/y coordinates are 2560,-238
For reference for my image coordinates that work on the main monitor i'm passing in x:801 y:227, width:1756, height:1115
For my 2nd monitor which sits to the right of my main monitor and is flipped sideways making it taller than wide I'm passing in x:2907 y:-11 width:737 height:1595
HRESULT CDXGICapture::CaptureToFile(_In_ LPCWSTR lpcwOutputFileName, int x, int y, int width, int height)
{
AUTOLOCK();
if (!m_bInitialized) {
return D2DERR_NOT_INITIALIZED;
}
CHECK_POINTER_EX(m_ipDxgiOutputDuplication, E_INVALIDARG);
CHECK_POINTER_EX(lpcwOutputFileName, E_INVALIDARG);
HRESULT hr = S_OK;
hr = DXGICaptureHelper::IsRendererInfoValid(&m_rendererInfo);
if (FAILED(hr)) {
return hr;
}
// is valid?
hr = DXGICaptureHelper::GetContainerFormatByFileName(lpcwOutputFileName);
if (FAILED(hr)) {
return hr;
}
DXGI_OUTDUPL_FRAME_INFO FrameInfo;
CComPtr<IDXGIResource> ipDesktopResource;
CComPtr<ID3D11Texture2D> ipAcquiredDesktopImage;
CComPtr<ID2D1Bitmap> ipD2D1SourceBitmap;
// Get new frame
int lTryCount = 4;
do
{
Sleep(1);
hr = m_ipDxgiOutputDuplication->AcquireNextFrame(250, &FrameInfo, &ipDesktopResource);
if (SUCCEEDED(hr))
break;
if (hr == DXGI_ERROR_WAIT_TIMEOUT)
{
continue;
}
else if (FAILED(hr))
break;
} while (--lTryCount > 0);
if (hr == DXGI_ERROR_WAIT_TIMEOUT)
{
return S_FALSE;
}
else if (FAILED(hr))
{
return hr;
}
// QI for ID3D11Texture2D
hr = ipDesktopResource->QueryInterface(IID_PPV_ARGS(&ipAcquiredDesktopImage));
ipDesktopResource = nullptr;
CHECK_HR_RETURN(hr);
if (nullptr == ipAcquiredDesktopImage)
{
// release frame
m_ipDxgiOutputDuplication->ReleaseFrame();
return E_OUTOFMEMORY;
}
// Copy needed full part of desktop image
m_ipD3D11DeviceContext->CopyResource(m_ipCopyTexture2D, ipAcquiredDesktopImage);
// release frame
hr = m_ipDxgiOutputDuplication->ReleaseFrame();
CHECK_HR_RETURN(hr);
// create D2D1 source bitmap
hr = DXGICaptureHelper::CreateBitmap(m_ipD2D1RenderTarget, m_ipCopyTexture2D, &ipD2D1SourceBitmap);
CHECK_HR_RETURN(hr);
//try this
D2D1_RECT_F rcSource = D2D1::RectF(
(FLOAT)x,
(FLOAT)y,
(FLOAT)x + width,
(FLOAT)y+ height);
D2D1_RECT_F rcTarget = D2D1::RectF(
(FLOAT)0,
(FLOAT)0,
(FLOAT)width,
(FLOAT)height);
D2D1_POINT_2F ptTransformCenter = D2D1::Point2F(width / 2.0f, height / 2.0f);
// Apply the rotation transform to the render target.
D2D1::Matrix3x2F rotate = D2D1::Matrix3x2F::Rotation(
m_rendererInfo.RotationDegrees,
ptTransformCenter
);
D2D1::Matrix3x2F scale = D2D1::Matrix3x2F::Scale(
D2D1::SizeF(1, 1),
ptTransformCenter
);
// Priority: first rotate, after scale...
m_ipD2D1RenderTarget->SetTransform(rotate * scale);
m_ipD2D1RenderTarget->BeginDraw();
// clear background color
m_ipD2D1RenderTarget->Clear(D2D1::ColorF(D2D1::ColorF::White, 1.0f));
m_ipD2D1RenderTarget->DrawBitmap(ipD2D1SourceBitmap, rcTarget, 1.0f, D2D1_BITMAP_INTERPOLATION_MODE_LINEAR, rcSource);
//m_ipD2D1RenderTarget->SetTransform(D2D1::Matrix3x2F::Identity());
hr = m_ipD2D1RenderTarget->EndDraw();
if (FAILED(hr)) {
return hr;
}
hr = DXGICaptureHelper::SaveImageToFile(m_ipWICImageFactory, m_ipWICOutputBitmap, lpcwOutputFileName, width, height);
if (FAILED(hr)) {
return hr;
}
return S_OK;
} // CaptureToFile
My save to file method
static
COM_DECLSPEC_NOTHROW
inline
HRESULT
SaveImageToFile(
_In_ IWICImagingFactory* pWICImagingFactory,
_In_ IWICBitmapSource* pWICBitmapSource,
_In_ LPCWSTR lpcwFileName,
_In_ unsigned int uiWidth,
_In_ unsigned int uiHeight
)
{
CHECK_POINTER_EX(pWICImagingFactory, E_INVALIDARG);
CHECK_POINTER_EX(pWICBitmapSource, E_INVALIDARG);
HRESULT hr = S_OK;
GUID guidContainerFormat;
hr = GetContainerFormatByFileName(lpcwFileName, &guidContainerFormat);
if (FAILED(hr)) {
return hr;
}
WICPixelFormatGUID format = GUID_WICPixelFormatDontCare;
CComPtr<IWICImagingFactory> ipWICImagingFactory(pWICImagingFactory);
CComPtr<IWICBitmapSource> ipWICBitmapSource(pWICBitmapSource);
CComPtr<IWICStream> ipStream;
CComPtr<IWICBitmapEncoder> ipEncoder;
CComPtr<IWICBitmapFrameEncode> ipFrameEncode;
// unsigned int uiWidth = 1756;
// unsigned int uiHeight = 1115;
hr = ipWICImagingFactory->CreateStream(&ipStream);
if (SUCCEEDED(hr)) {
hr = ipStream->InitializeFromFilename(lpcwFileName, GENERIC_WRITE);
}
if (SUCCEEDED(hr)) {
hr = ipWICImagingFactory->CreateEncoder(guidContainerFormat, NULL, &ipEncoder);
}
if (SUCCEEDED(hr))
{
hr = ipEncoder->Initialize(ipStream, WICBitmapEncoderNoCache);
}
if (SUCCEEDED(hr))
{
hr = ipEncoder->CreateNewFrame(&ipFrameEncode, NULL);
}
if (SUCCEEDED(hr))
{
hr = ipFrameEncode->Initialize(NULL);
}
if (SUCCEEDED(hr))
{
hr = ipFrameEncode->SetSize(uiWidth, uiHeight);
}
if (SUCCEEDED(hr))
{
hr = ipFrameEncode->SetPixelFormat(&format);
}
if (SUCCEEDED(hr))
{
hr = ipFrameEncode->WriteSource(ipWICBitmapSource, NULL);
}
if (SUCCEEDED(hr))
{
hr = ipFrameEncode->Commit();
}
if (SUCCEEDED(hr))
{
hr = ipEncoder->Commit();
}
return hr;
} // SaveImageToFile

The coordinates being passed in needed to be normalized to the monitor by itself without considering the other monitors. IE all x and y values needed to be positive from a 0,0 being the top left of the monitor.
To get the coordinates for a monitor I used the following where the Control is the view I was trying to capture within my application.
Monitor[] monitors = Display.getDefault().getMonitors();
Control control = getScreenCaptureControlArea();
Point p = control.toDisplay(0, 0);
for (Monitor monitor : monitors) {
Rectangle bounds = monitor.getBounds();
if (monitor.getBounds().contains(p.x, p.y)) {
setxCord(Math.abs(Math.abs(bounds.x) - Math.abs(p.x)));
setyCord(Math.abs(Math.abs(bounds.y) - Math.abs(p.y)));
setWidth(control.getSize().x);
setHeight(control.getSize().y);
break;
}
}
Will note that this still doesnt work when the monitor is in Portrait mode as the image saved is incorrect. But seeing as my application isnt meant to be used in a Portrait mode it didnt matter much so i didnt put any more effort into figuring out that problem.

Related

GetPixel via DirectX 11 efficient implementation

I have this function that get a Pixel color from the screen with GDI+ API:
RGBTRIPLE GetPixelColorGDI(int x, int y)
{
RGBTRIPLE rgb;
HDC dc = GetDC(NULL);
COLORREF color = GetPixel(dc, x, y);
ReleaseDC(NULL, dc);
rgb.rgbtRed = GetRValue(color);
rgb.rgbtGreen = GetGValue(color);
rgb.rgbtBlue = GetBValue(color);
return rgb;
}
the function works perfectly but it is too slow.
For this reason I like to try to implement the equivalent in DirectX 11 (I don't have a good experience in DirectX in general) and please excuse me if it is incorrect, this in my first attempt:
/// <summary>
/// Get pixel color from screen (24 bit) via DirextX 11
/// </summary>
/// <param name="X"></param>
/// <param name="Y"></param>
/// <param name="swapchainDescription"></param>
RGBTRIPLE GetPixelColorDX11(int X, int Y, IDXGISwapChain* pSwapChain)
{
RGBTRIPLE rgb;
HRESULT hr = 0;
using Texture2D = ID3D11Texture2D*;
IDXGIResource* backbufferptr = nullptr;
ID3D11Resource* SourceResource = nullptr;
Texture2D DestResource = nullptr;
ID3D11Device* device = nullptr;
ID3D11DeviceContext* context = nullptr;
D3D11_MAPPED_SUBRESOURCE MappedSubresource;
hr = pSwapChain->GetBuffer(0, __uuidof(IDXGIResource), (void**)&backbufferptr);
if (hr < 0) {
return;
}
hr = backbufferptr->QueryInterface(__uuidof(ID3D11Resource), (void**)&SourceResource);
if (hr < 0) {
return;
}
hr = pSwapChain->GetDevice(__uuidof(ID3D11Device), (void**)&device);
if (hr < 0) {
return;
}
DXGI_SWAP_CHAIN_DESC desc;
hr = pSwapChain->GetDesc(&desc);
if (hr < 0) {
return;
}
D3D11_TEXTURE2D_DESC TextureDesciption = { };
TextureDesciption.Format = desc.BufferDesc.Format;
TextureDesciption.Width = desc.BufferDesc.Width;
TextureDesciption.Height = desc.BufferDesc.Height;
TextureDesciption.MipLevels = 1;
TextureDesciption.ArraySize = 1;
TextureDesciption.SampleDesc.Count = 1;
TextureDesciption.Usage = D3D11_USAGE_STAGING;
TextureDesciption.BindFlags = 0;
TextureDesciption.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
TextureDesciption.MiscFlags = 0;
hr = device->CreateTexture2D(&TextureDesciption, nullptr, &DestResource);
if (hr < 0) {
return;
}
device->GetImmediateContext(&context);
if (!context) {
return;
}
context->CopyResource(DestResource, SourceResource);
context->Map(DestResource, 0, D3D11_MAP_READ, 0, &MappedSubresource);
COLORREF* pPixels = (COLORREF*)MappedSubresource.pData;
rgb.rgbtRed = (pPixels[0] >> 16) & 0xff;
rgb.rgbtGreen = (pPixels[0] >> 8) & 0xff;
rgb.rgbtBlue = pPixels[0] & 0xff;
return rgb;
}
In this code is missing where I can set in X and Y coordinate in pixel because I don't known how to do it.
Apart this I don't sure if this way is enough efficient to make this more faster then the GDI++
GetPixel version.
The idea is (I don't sure if is possible) is select only one pixel (x, y) on source texture and read the color on "pPixels[0]"
Update
I explain better why I need and why.
My hooked DLL hook the DX11 "present" and draw a rectangle using shader that work as "placeholder" on a game menu.
Drawing phase is very fast becouse use DX11 and there no any FPS drop.
To do it I use a library called "imgui".
Before draw the rectangle I need to known in what position I need to place the rectangle and this depend by the result of the actual GetPixelColorGDI that is slow.
The user press a arrow key to move the rectangle on another position.
I call "GetPixelColorGDI" from 1 to 11 times for single screen.
If the next slot is filled I call GetPixelColorGDI only one time, but if the next 10 slots are empty I need to call GetPixelColorGDI 10 times.
So best solution can be create GetPixelColorDX11 like this:
RGBTRIPLE GetPixelColorDX11(int X, int Y, IDXGISwapChain* pSwapChain)
and this overloald:
struct COORDINATE
{
int x;
int y;
}
RGBTRIPLE[] GetPixelColorDX11(COORDINATE[] Pixel , IDXGISwapChain* pSwapChain)
so this can cover all cases.
here my function that I use to initialize DirectX to draw the rectangles:
bool InitDirectX(IDXGISwapChain* pChain)
{
// Get swapchain
pSwapchain = pChain;
// Get device and context
HRESULT hr = pSwapchain->GetDevice(__uuidof(ID3D11Device), (PVOID*)&pDevice);
if (FAILED(hr))
{
std::cerr << "Failed to get device from swapchain" << std::endl;
return false;
}
pDevice->GetImmediateContext(&pContext);
// Get window from swapchain description
DXGI_SWAP_CHAIN_DESC swapchainDescription;
pSwapchain->GetDesc(&swapchainDescription);
hWindow = swapchainDescription.OutputWindow;
// Use SetWindowLongPtr to modify window behaviour and get input
wndProcHandlerOriginal = (WNDPROC)SetWindowLongPtr(hWindow, GWLP_WNDPROC, (LONG_PTR)hWndProc);
std::cout << "Successfully initialised DirectX - resolution " << swapchainDescription.BufferDesc.Width << "x" << swapchainDescription.BufferDesc.Height << std::endl;
// Update the screen resolution multiplier
UpdateResolutionMultiplier(swapchainDescription.BufferDesc.Width, swapchainDescription.BufferDesc.Height);
return true;
}

Transparent glitches while drawing GIF

I wrote some solution to draw gifs, using Direct2D GIF sample:
// somewhere in render loop
IWICBitmapFrameDecode* frame = nullptr;
IWICFormatConverter* converter = nullptr;
gifDecoder->GetFrame(current_frame, &frame);
if (frame)
{
d2dWICFactory->CreateFormatConverter(&converter);
if (converter)
{
ID2D1Bitmap* temp = nullptr;
converter->Initialize(frame, GUID_WICPixelFormat32bppPBGRA,
WICBitmapDitherTypeNone, NULL, 0.f, WICBitmapPaletteType::WICBitmapPaletteTypeCustom);
tar->CreateBitmapFromWicBitmap(converter, NULL, &temp);
scale->SetValue(D2D1_SCALE_PROP_SCALE, D2D1::Vector2F(1, 1));
scale->SetInput(0, temp);
target->DrawImage(scale);
SafeRelease(&temp);
}
}
SafeRelease(&frame);
SafeRelease(&converter);
Where gifDecoder is obtained this way:
d2dWICFactory->CreateDecoderFromFilename(pathToGif, NULL, GENERIC_READ, WICDecodeMetadataCacheOnLoad, &gifDecoder);
But in my program almost with all gifs, all frames, except the first one, for some reason have horizontal transparent lines. Obviously, when I view the same gif in browser or in another program there are no that holes.
I tried to change pixel format, pallet type, but that glitches are still there.
What do I do wrong?
Basic steps:
On image load, in case of gif, for caching purposes store not all frames as ready to draw ID2D1Bitmaps but only the image's decoder. Every time decode the frame, get bitmap and draw it. At least, at first animation loop frames bitmaps and metadata (again, for each frame) can be cached.
For each gif create compatible target with the size of gif screen (obtained from metadata) for composing.
Compose animation on render: get metadata for current frame -> if current frame has disposal = true, clear compose target -> draw current frame to compose target (even if disposal true) -> draw compose target to render target,
Note, that frames may have different sizes and even offsets.
Approximate code:
ID2D1DeviceContext *renderTarget;
class GIF
{
ID2D1BitmapRenderTarget *compose;
IWICBitmapDecoder *decoder; // it seems like precache all frames costs too much time, it will be faster to obtain each frame each time (maybe cache during first loop)
float naturalWidth; // gif screen width
float naturalHeight; // gif screen height
int framesCount;
int currentFrame;
unsigned int lastFrameEpoch;
int x;
int y;
int width; // image width needed to be drawn
int height; // image height
float fw; // scale factor width;
float fh; // scale factor height
GIF(const wchar_t *path, int x, int y, int width, int height)
{
// Init, using WIC obtain here image's decoder, naturalWidth, naturalHeight
// framesCount and other optional stuff like loop information
// using IWICMetadataQueryReader obtained from the whole image,
// not from zero frame
compose = nullptr;
currentFrame = 0;
lastFrameEpoch = epoch(); // implement Your millisecond function
Resize(width, height);
}
void Resize(int width, int height)
{
this->width = width;
this->height = height;
// calculate scale factors
fw = width / naturalWidth;
fh = height / naturalHeight;
if(compose == nullptr)
{
renderTarget->CreateCompatibleRenderTarget(D2D1::SizeF(naturalWidth, naturalHeight), &compose);
compose->Clear(D2D1::ColorF(0,0,0,0));
}
}
void Render()
{
IWICBitmapFrameDecode* frame = nullptr;
decoder->GetFrame(currentFrame, &frame);
IWICFormatConverter* converter = nullptr;
d2dWICFactory->CreateFormatConverter(&converter);
converter->Initialize(frame, GUID_WICPixelFormat32bppPBGRA,
WICBitmapDitherTypeNone, NULL, 0.f, WICBitmapPaletteType::WICBitmapPaletteTypeCustom);
IWICMetadataQueryReader* reader = nullptr;
frame->GetMetadataQueryReader(&reader);
PROPVARIANT propValue;
PropVariantInit(&propValue);
char disposal = 0;
float l = 0, t = 0; // frame offsets (may have)
HRESULT hr = S_OK;
reader->GetMetadataByName(L"/grctlext/Delay", &propValue);
if (SUCCEEDED((propValue.vt == VT_UI2 ? S_OK : E_FAIL)))
{
UINT frameDelay = 0;
UIntMult(propValue.uiVal, 10, &fr);
frameDelay = fr;
if (frameDelay < 16)
{
frameDelay = 16;
}
if(epoch() - lastFrameEpoch < frameDelay)
{
SafeRelease(&reader);
SafeRelease(&frame);
SafeRelease(&converter);
return;
}
}
if (SUCCEEDED(reader->GetMetadataByName(
L"/grctlext/Disposal",
&propValue)))
{
hr = (propValue.vt == VT_UI1) ? S_OK : E_FAIL;
if (SUCCEEDED(hr))
{
disposal = propValue.bVal;
}
}
PropVariantClear(&propValue);
{
hr = reader->GetMetadataByName(L"/imgdesc/Left", &propValue);
if (SUCCEEDED(hr))
{
hr = (propValue.vt == VT_UI2 ? S_OK : E_FAIL);
if (SUCCEEDED(hr))
{
l = static_cast<FLOAT>(propValue.uiVal);
}
PropVariantClear(&propValue);
}
}
{
hr = reader->GetMetadataByName(L"/imgdesc/Top", &propValue);
if (SUCCEEDED(hr))
{
hr = (propValue.vt == VT_UI2 ? S_OK : E_FAIL);
if (SUCCEEDED(hr))
{
t = static_cast<FLOAT>(propValue.uiVal);
}
PropVariantClear(&propValue);
}
}
renderTarget->CreateBitmapFromWicBitmap(converter, NULL, &temp);
compose->BeginDraw();
if (disposal == 2)
compose->Clear(D2D1::ColorF(0, 0, 0, 0));
auto ss = temp->GetSize();
compose->DrawBitmap(temp, D2D1::RectF(l, t, l + ss.width, t + ss.height));
compose->EndDraw();
ID2D1Bitmap* composition = nullptr;
compose->GetBitmap(&composition);
// You need to create scale effect to have antialised resizing
scale->SetValue(D2D1_SCALE_PROP_SCALE, D2D1::Vector2F(fw, fh));
scale->SetInput(0, composition);
auto p = D2D1::Point2F(x, y);
renderTarget->DrawImage(iscale, p);
SafeRelease(&temp);
SafeRelease(&composition);
SafeRelease(&reader);
SafeRelease(&frame);
SafeRelease(&converter);
}
}*gif[100] = {nullptr};
inline void Render()
{
renderTarget->BeginDraw();
for(int i = 0; i < 100 && gif[i] != nullptr; i++)
gif[i]->Render();
renderTarget->EndDraw();
}

DirectX Full screen app capture

I have a problem with capturing screen from full screen application (game). I made code to capture screen based on code from those links:
Capture screen using DirectX
DirectX Partial Screen Capture
But it only works when using somewhere on desktop, not in full screen app. Tried to change "parameters.Windowed" to false, not worked, tried some options within createdevice function, but when full screen it cannot create device (stays null, hr returned is 0x88760868).
Code:
void get_screen()
{
UINT adapter = D3DADAPTER_DEFAULT;
IDirect3D9 *d3d = nullptr;
IDirect3DDevice9 *device = nullptr;
IDirect3DSurface9 *surface = nullptr;
D3DPRESENT_PARAMETERS parameters = { 0 };
D3DDISPLAYMODE mode; //screen resolution
D3DLOCKED_RECT rc;
UINT pitch;
SYSTEMTIME st;
LPBYTE shots = nullptr;
HRESULT hr;
// init D3D and get screen size
d3d = Direct3DCreate9(D3D_SDK_VERSION);
d3d->GetAdapterDisplayMode(adapter, &mode);
parameters.Windowed = TRUE;
parameters.BackBufferFormat = D3DFMT_A8R8G8B8;
parameters.BackBufferHeight = mode.Height;
parameters.BackBufferWidth = mode.Width;
parameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
// create device & capture surface
d3d->CreateDevice(adapter, D3DDEVTYPE_HAL, NULL, D3DCREATE_HARDWARE_VERTEXPROCESSING, &parameters, &device);
device->CreateOffscreenPlainSurface(mode.Width, mode.Height, D3DFMT_A8R8G8B8, D3DPOOL_SYSTEMMEM, &surface, nullptr);
// get the data
device->GetFrontBufferData(0, surface);
surface->LockRect(&rc, NULL, 0);
pitch = rc.Pitch;
shots = new BYTE[pitch * mode.Height];
CopyMemory(shots, rc.pBits, rc.Pitch * mode.Height);
surface->UnlockRect();
// save all screenshots
WCHAR file[100] = L"test.png";
save_screen(mode.Width, mode.Height, pitch, shots, file, GUID_ContainerFormatPng);
//cleanup:
surface->Release();
device->Release();
d3d->Release();
}

Capture screen of any Windows app?

I'm trying to write a Windows C++ program that will try to pick out a color of interest from whatever is currently being displayed on the screen.
I've tried following examples for GDI, Direct3D9, and Direct3D11 DXGI, and they all seem to work only for capturing the Windows desktop and/or my own application's own output. When I launch a full-screen Direct3D game, I seem to to end up with some flavor of blank pixel data.
It must be possible to accomplish this, or else OBS Studio, FRAPS, etc. would not work as transparently as they do.
I know I could try to reverse engineer OBS Studio, but does anybody have a more succinct C++ solution for capturing an arbitrary Windows application's video output as some kind of pixel buffer?
Edit: I should also mention that capture of regular desktop windows seems to work. It's fullscreen games that are giving me trouble.
Edit: A commenter requested my GDI code. Here is my GDI and D3D9 code. As you can see, I tried a few variations based on conflicting examples that I found:
std::wstring GetScreenColor(COLORREF& colorRef)
{
std::wstring retVal;
//const int desktopWidth(GetDeviceCaps(desktopHdc, HORZRES));
//const int desktopHeight(GetDeviceCaps(desktopHdc, VERTRES));
// const int desktopWidth(GetSystemMetrics(SM_CXVIRTUALSCREEN));
// const int desktopHeight(GetSystemMetrics(SM_CYVIRTUALSCREEN));
const int desktopWidth(GetSystemMetrics(SM_CXSCREEN));
const int desktopHeight(GetSystemMetrics(SM_CYSCREEN));
HWND desktopHwnd(GetDesktopWindow());
// HDC desktopHdc(GetDC(NULL));
HDC desktopHdc(GetDC(desktopHwnd));
HDC myHdc(CreateCompatibleDC(desktopHdc));
const HBITMAP desktopBitmap(CreateCompatibleBitmap(desktopHdc, desktopWidth, desktopHeight));
SelectObject(myHdc, desktopBitmap);
// BitBlt(myHdc, 0, 0, desktopWidth, desktopHeight, desktopHdc, 0, 0, SRCCOPY);
BitBlt(myHdc, 0, 0, desktopWidth, desktopHeight, desktopHdc, 0, 0, SRCCOPY | CAPTUREBLT);
//SelectObject(myHdc, hOld);
ReleaseDC(NULL, desktopHdc);
BITMAPINFO bitmapInfo = { 0 };
bitmapInfo.bmiHeader.biSize = sizeof(bitmapInfo.bmiHeader);
bitmapInfo.bmiHeader.biWidth = desktopWidth;
bitmapInfo.bmiHeader.biHeight = -desktopHeight;
bitmapInfo.bmiHeader.biPlanes = 1;
bitmapInfo.bmiHeader.biBitCount = 24;
bitmapInfo.bmiHeader.biCompression = BI_RGB;
bitmapInfo.bmiHeader.biSizeImage = 0;
// TODO: use a persistent buffer?
const unsigned long numPixels(desktopHeight * desktopWidth);
ColorBGRS* rawPixels(new ColorBGRS[numPixels]);
if (!GetDIBits(myHdc, desktopBitmap, 0, desktopHeight, rawPixels, &bitmapInfo, DIB_RGB_COLORS))
{
delete[] rawPixels;
ReleaseDC(desktopHwnd, desktopHdc);
DeleteDC(myHdc);
DeleteObject(desktopBitmap);
return L"GetDIBits() failed";
}
unsigned long redSum(0);
unsigned long greenSum(0);
unsigned long blueSum(0);
for (unsigned long index(0); index < numPixels; ++index)
{
blueSum += rawPixels[index].blue;
greenSum += rawPixels[index].green;
redSum += rawPixels[index].red;
}
const unsigned long redAverage(redSum / numPixels);
const unsigned long blueAverage(blueSum / numPixels);
const unsigned long greenAverage(greenSum / numPixels);
colorRef = RGB(redAverage, greenAverage, blueAverage);
delete[] rawPixels;
ReleaseDC(desktopHwnd, desktopHdc);
DeleteDC(myHdc);
DeleteObject(desktopBitmap);
return std::wstring();
}
std::wstring GetScreenColor2(COLORREF& colorRef)
{
IDirect3D9* d3d9(Direct3DCreate9(D3D_SDK_VERSION));
if (!d3d9)
{
d3d9->Release();
return L"Direct3DCreate9() failed";
}
D3DDISPLAYMODE d3dDisplayMode;
if (FAILED(d3d9->GetAdapterDisplayMode(D3DADAPTER_DEFAULT, &d3dDisplayMode)))
{
return L"GetAdapterDisplayMode() failed";
}
D3DPRESENT_PARAMETERS d3dPresentParams;
ZeroMemory(&d3dPresentParams, sizeof(D3DPRESENT_PARAMETERS));
d3dPresentParams.Windowed = TRUE;
d3dPresentParams.Flags = D3DPRESENTFLAG_LOCKABLE_BACKBUFFER;
d3dPresentParams.BackBufferFormat = d3dDisplayMode.Format;
d3dPresentParams.BackBufferCount = 1;
d3dPresentParams.BackBufferHeight = d3dDisplayMode.Height;
d3dPresentParams.BackBufferWidth = d3dDisplayMode.Width;
d3dPresentParams.MultiSampleType = D3DMULTISAMPLE_NONE;
d3dPresentParams.SwapEffect = D3DSWAPEFFECT_DISCARD;
//d3dPresentParams.SwapEffect = D3DSWAPEFFECT_COPY;
d3dPresentParams.hDeviceWindow = NULL; //hWnd;
d3dPresentParams.PresentationInterval = D3DPRESENT_INTERVAL_DEFAULT;
d3dPresentParams.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
IDirect3DDevice9* d3d9Device(0);
if (FAILED(d3d9->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, d3dPresentParams.hDeviceWindow, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &d3dPresentParams, &d3d9Device)))
{
d3d9->Release();
return L"CreateDevice() failed";
}
IDirect3DSurface9* d3d9Surface(0);
// if (FAILED(d3d9Device->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &d3d9Surface))) return L"GetBackBuffer() failed";
if (FAILED(d3d9Device->CreateOffscreenPlainSurface(d3dDisplayMode.Width, d3dDisplayMode.Height, D3DFMT_A8R8G8B8, D3DPOOL_SCRATCH, &d3d9Surface, NULL)))
// if (FAILED(d3d9Device->CreateOffscreenPlainSurface(d3dDisplayMode.Width, d3dDisplayMode.Height, D3DFMT_A8R8G8B8, D3DPOOL_SYSTEMMEM, &d3d9Surface, NULL)))
{
d3d9Device->Release();
d3d9->Release();
return L"CreateOffscreenPlainSurface() failed";
}
if (FAILED(d3d9Device->GetFrontBufferData(0, d3d9Surface)))
{
d3d9Surface->Release();
d3d9Device->Release();
d3d9->Release();
return L"GetFrontBufferData() failed";
}
D3DLOCKED_RECT d3dLockedRect;
if (FAILED(d3d9Surface->LockRect(&d3dLockedRect, 0, D3DLOCK_NO_DIRTY_UPDATE |
D3DLOCK_NOSYSLOCK |
D3DLOCK_READONLY)))
{
d3d9Surface->UnlockRect();
d3d9Surface->Release();
d3d9Device->Release();
d3d9->Release();
return L"LockRect() failed";
}
const unsigned long numPixels(d3dDisplayMode.Height * d3dDisplayMode.Width);
BYTE* rawPixels((BYTE*)(d3dLockedRect.pBits));
colorRef = RGB(*(rawPixels + 2), *(rawPixels + 1), *(rawPixels));
d3d9Surface->UnlockRect();
d3d9Surface->Release();
d3d9Device->Release();
d3d9->Release();
return std::wstring();
}
There is the Desktop Duplication API since Windows 8 that is able to record fullscreen applications like games. I have recently made this library for one of my projects that you maybe can use for reference. Only in your case you need to get the raw pixel data from the textures instead of writing them to video or images.
edit: added a small example of reinitialization on lost access.
{
CComPtr<ID3D11Device> pDevice;
CComPtr<IDXGIOutputDuplication> pDeskDupl;
//(...)create devices and duplication interface etc here..
InitializeDesktopDupl(pDevice, &pDeskDupl, &OutputDuplDesc);
while(true) //capture loop gist.
{
IDXGIResource *pDesktopResource = nullptr;
DXGI_OUTDUPL_FRAME_INFO FrameInfo;
RtlZeroMemory(&FrameInfo, sizeof(FrameInfo));
// Get new frame
HRESULT hr = pDeskDupl->AcquireNextFrame(
99,//timeout in ms
&FrameInfo,
&pDesktopResource);
if (hr == DXGI_ERROR_ACCESS_LOST) {
pDeskDupl->ReleaseFrame();
pDeskDupl.Release();
pDesktopResource->Release();
hr = InitializeDesktopDupl(pDevice, &pDeskDupl, &OutputDuplDesc);
if(FAILED(hr)){
//Check if everything is OK before continuing
}
}
}
}
HRESULT InitializeDesktopDupl(ID3D11Device *pDevice, IDXGIOutputDuplication **ppDesktopDupl, DXGI_OUTDUPL_DESC *pOutputDuplDesc)
{
*ppDesktopDupl = NULL;
*pOutputDuplDesc;
// Get DXGI device
CComPtr<IDXGIDevice> pDxgiDevice;
CComPtr<IDXGIOutputDuplication> pDeskDupl = NULL;
DXGI_OUTDUPL_DESC OutputDuplDesc;
HRESULT hr = pDevice->QueryInterface(IID_PPV_ARGS(&pDxgiDevice));
if (FAILED(hr)) { return hr; }
// Get DXGI adapter
CComPtr<IDXGIAdapter> pDxgiAdapter;
hr = pDxgiDevice->GetParent(
__uuidof(IDXGIAdapter),
reinterpret_cast<void**>(&pDxgiAdapter));
pDxgiDevice.Release();
if (FAILED(hr)) { return hr; }
// Get output
CComPtr<IDXGIOutput> pDxgiOutput;
hr = pDxgiAdapter->EnumOutputs(
m_DisplayOutput,
&pDxgiOutput);
if (FAILED(hr)) { return hr; }
pDxgiAdapter.Release();
CComPtr<IDXGIOutput1> pDxgiOutput1;
hr = pDxgiOutput->QueryInterface(IID_PPV_ARGS(&pDxgiOutput1));
if (FAILED(hr)) { return hr; }
pDxgiOutput.Release();
// Create desktop duplication
hr = pDxgiOutput1->DuplicateOutput(
pDevice,
&pDeskDupl);
if (FAILED(hr)) { return hr; }
pDxgiOutput1.Release();
// Create GUI drawing texture
pDeskDupl->GetDesc(&OutputDuplDesc);
pDxgiOutput1.Release();
*ppDesktopDupl = pDeskDupl;
(*ppDesktopDupl)->AddRef();
*pOutputDuplDesc = OutputDuplDesc;
return hr;
}

Creating a D3D9 Hardware Overlay Issue

I want to overlay and D3D game using a D3D hardware overlay. I'm creating the overlay with this function: (source)
HRESULT CreateHWOverlay(HWND hwnd, IDirect3D9Ex *pD3D, IDirect3DDevice9Ex **ppDevice)
{
*ppDevice = NULL;
D3DCAPS9 caps;
ZeroMemory(&caps, sizeof(caps));
HRESULT hr = pD3D->GetDeviceCaps(
D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
&caps
);
if (FAILED(hr))
{
return hr;
}
// Check if overlay is supported.
if (!(caps.Caps & D3DCAPS_OVERLAY))
{
return D3DERR_UNSUPPORTEDOVERLAY;
}
D3DOVERLAYCAPS overlayCaps = { 0 };
IDirect3DDevice9Ex *pDevice = NULL;
IDirect3D9ExOverlayExtension *pOverlay = NULL;
// Check specific overlay capabilities.
hr = pD3D->QueryInterface(IID_PPV_ARGS(&pOverlay));
if (SUCCEEDED(hr))
{
hr = pOverlay->CheckDeviceOverlayType(
D3DADAPTER_DEFAULT,
D3DDEVTYPE_HAL,
1920,
1200,
D3DFMT_X8R8G8B8,
NULL,
D3DDISPLAYROTATION_IDENTITY,
&overlayCaps
);
}
// Create the overlay.
if (SUCCEEDED(hr))
{
DWORD flags = D3DCREATE_FPU_PRESERVE |
D3DCREATE_MULTITHREADED |
D3DCREATE_SOFTWARE_VERTEXPROCESSING;
D3DPRESENT_PARAMETERS pp = { 0 };
pp.BackBufferWidth = 1920;//overlayCaps.MaxOverlayDisplayWidth;
pp.BackBufferHeight = 1200;//overlayCaps.MaxOverlayDisplayHeight;
pp.BackBufferFormat = D3DFMT_X8R8G8B8;
pp.SwapEffect = D3DSWAPEFFECT_OVERLAY;
pp.hDeviceWindow = hwnd;
pp.Windowed = TRUE;
pp.Flags = D3DPRESENTFLAG_VIDEO;
pp.FullScreen_RefreshRateInHz = D3DPRESENT_RATE_DEFAULT;
pp.PresentationInterval = D3DPRESENT_INTERVAL_ONE;
hr = pD3D->CreateDeviceEx(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL,
NULL, flags, &pp, NULL, &pDevice);
}
if (SUCCEEDED(hr))
{
(*ppDevice) = pDevice;
(*ppDevice)->AddRef();
}
SafeRelease(&pD3D);
SafeRelease(&pDevice);
SafeRelease(&pOverlay);
return hr;
}
painting here:
while(true)
{
//Sleep(10);
D3DRECT rect = {50,50,200,200};
g_pDeviceEx->BeginScene();
RECT r = {0,0,1920,1200};
RECT xrect = {200, 200, 500, 300};
g_Font->DrawTextA(0, "D3D HARDWARE OVERLAY", -1, &xrect, DT_LEFT, D3DCOLOR_ARGB(255,255,0,0));
g_pDeviceEx->Clear(1, &rect, D3DCLEAR_TARGET, D3DCOLOR_ARGB(255, 0,255,0), 50.0,0);
g_pDeviceEx->EndScene();
g_pDeviceEx->PresentEx(&r, &r, hWnd, NULL, 0);
}
The overlay is just being displayed when i change the game's resolution when its black. as soon as you can see the world the text and the rect is gone. What am i doing wrong? and yes, im running windowed mode.
The post is 2 month old but the answer is :
g_pDeviceEx->Clear -> erase backbuffer with color (255, 0,255,0), put clear before BeginScene
a+