How to access pixels data from ID3D11Texture2D? - c++

I'm using Windows Desktop Duplication API to make my own mirroring protocol.
I have this piece of code :
// Get new frame
HRESULT hr = m_DeskDupl->AcquireNextFrame(500, &FrameInfo, &DesktopResource);
if (hr == DXGI_ERROR_WAIT_TIMEOUT)
{
*Timeout = true;
return DUPL_RETURN_SUCCESS;
}
Here is the FrameInfo structure :
`typedef struct _FRAME_DATA {
ID3D11Texture2D* Frame;
DXGI_OUTDUPL_FRAME_INFO FrameInfo;
_Field_size_bytes_((MoveCount * sizeof(DXGI_OUTDUPL_MOVE_RECT)) + (DirtyCount * sizeof(RECT))) BYTE* MetaData;
UINT DirtyCount;
UINT MoveCount;
} FRAME_DATA;`
I would like to extract the pixel buffer from ID3D11Texture2D* Frame;
How can I extract on a BYTE * or unsigned char * and have a RGB sequence ?
Thank you !

You need to create a second texture of the same size with CPU read access using ID3D11Device::CreateTexture2D, copy whole frame or just updated parts to this texture on GPU using ID3D11DeviceContext::CopyResource or ID3D11DeviceContext::CopySubresourceRegion (it is possible to retrieve which parts were updated using IDXGIOutputDuplication::GetFrameDirtyRects and IDXGIOutputDuplication::GetFrameMoveRects), map second texture to make it accessible by CPU using ID3D11DeviceContext::Map which gives you D3D11_MAPPED_SUBRESOURCE struct containing pointer to buffer with frame data and it's size, that is what you are looking for.
Microsoft provides a rather detailed Desktop Duplication API usage sample implementing all the steps mentioned above.
There is also a straight sample demonstrating how to save ID3D11Texture2D data to file.

Hi here is the code which helps for your requirement. output will be in UCHAR buffer g_iMageBuffer
//Variable Declaration
IDXGIOutputDuplication* IDeskDupl;
IDXGIResource* lDesktopResource = nullptr;
DXGI_OUTDUPL_FRAME_INFO IFrameInfo;
ID3D11Texture2D* IAcquiredDesktopImage;
ID3D11Texture2D* lDestImage;
ID3D11DeviceContext* lImmediateContext;
UCHAR* g_iMageBuffer=nullptr;
//Screen capture start here
hr = lDeskDupl->AcquireNextFrame(20, &lFrameInfo, &lDesktopResource);
// >QueryInterface for ID3D11Texture2D
hr = lDesktopResource->QueryInterface(IID_PPV_ARGS(&lAcquiredDesktopImage));
lDesktopResource.Release();
// Copy image into GDI drawing texture
lImmediateContext->CopyResource(lDestImage,lAcquiredDesktopImage);
lAcquiredDesktopImage.Release();
lDeskDupl->ReleaseFrame();
// Copy GPU Resource to CPU
D3D11_TEXTURE2D_DESC desc;
lDestImage->GetDesc(&desc);
D3D11_MAPPED_SUBRESOURCE resource;
UINT subresource = D3D11CalcSubresource(0, 0, 0);
lImmediateContext->Map(lDestImage, subresource, D3D11_MAP_READ_WRITE, 0, &resource);
std::unique_ptr<BYTE> pBuf(new BYTE[resource.RowPitch*desc.Height]);
UINT lBmpRowPitch = lOutputDuplDesc.ModeDesc.Width * 4;
BYTE* sptr = reinterpret_cast<BYTE*>(resource.pData);
BYTE* dptr = pBuf.get() + resource.RowPitch*desc.Height - lBmpRowPitch;
UINT lRowPitch = std::min<UINT>(lBmpRowPitch, resource.RowPitch);
for (size_t h = 0; h < lOutputDuplDesc.ModeDesc.Height; ++h)
{
memcpy_s(dptr, lBmpRowPitch, sptr, lRowPitch);
sptr += resource.RowPitch;
dptr -= lBmpRowPitch;
}
lImmediateContext->Unmap(lDestImage, subresource);
long g_captureSize=lRowPitch*desc.Height;
g_iMageBuffer= new UCHAR[g_captureSize];
g_iMageBuffer = (UCHAR*)malloc(g_captureSize);
//Copying to UCHAR buffer
memcpy(g_iMageBuffer,pBuf,g_captureSize);

Related

Rendering frames from a webcam to a DirectX 11 texture

I am having difficulty updating a DirectX 11 texture with the image data from a webcam frame buffer in memory. I've managed to create a texture from a single frame in the buffer but as the buffer is overwritten with the next frame the texture doesn't update. So I'm left with a snap shot image rather than a live stream which I'm after.
I am trying to use the Map/Unmap methods for updating an ID3D11Texture2D resource because that is supposedly more efficient than using the UpdateSubresource method. I haven't managed to get either to work. I'm new to DirectX and I just can't find a good explanation anywhere on how to accomplish this.
Create texture here:
bool CreateCamTexture(ID3D11ShaderResourceView** out_srv, RGBQUAD* ptrimg, int* image_width, int* image_height)
{
ZeroMemory(&desc, sizeof(desc));
desc.Width = *image_width;
desc.Height = *image_height;
desc.MipLevels = 1;
desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
desc.SampleDesc.Count = 1;
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
std::cout << ptrimg << std::endl;
subResource.pSysMem = ptrimg;
subResource.SysMemPitch = desc.Width * 4;
subResource.SysMemSlicePitch = 0;
g_pd3dDevice->CreateTexture2D(&desc, &subResource, &pTexture);
ZeroMemory(&srvDesc, sizeof(srvDesc));
srvDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MipLevels = desc.MipLevels;
srvDesc.Texture2D.MostDetailedMip = 0;
g_pd3dDevice->CreateShaderResourceView(pTexture, &srvDesc, out_srv);
if (pTexture != NULL) {
pTexture->Release();
}
else
{
std::cout << "pTexture is NULL ShaderResourceView not created" << std::endl;
}
return true;
}
bool CreateDeviceD3D(HWND hWnd)
{
// Setup swap chain
DXGI_SWAP_CHAIN_DESC sd;
ZeroMemory(&sd, sizeof(sd));
sd.BufferCount = 2;
sd.BufferDesc.Width = 0;
sd.BufferDesc.Height = 0;
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sd.BufferDesc.RefreshRate.Numerator = 60;
sd.BufferDesc.RefreshRate.Denominator = 1;
sd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
sd.OutputWindow = hWnd;
sd.SampleDesc.Count = 1;
sd.SampleDesc.Quality = 0;
sd.Windowed = TRUE;
sd.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
UINT createDeviceFlags = 0;
createDeviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
D3D_FEATURE_LEVEL featureLevel;
const D3D_FEATURE_LEVEL featureLevelArray[2] = { D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_0, };
Attempting to Map/Unmap texture:
void UpdateCamTexture() {
D3D11_MAPPED_SUBRESOURCE mappedResource;
ZeroMemory(&mappedResource, sizeof(D3D11_MAPPED_SUBRESOURCE));
g_pd3dDeviceContext->Map(
pTexture,
0, //0,
D3D11_MAP_WRITE_DISCARD,
0,
&mappedResource);
memcpy(mappedResource.pData, listener_instance.pImgData, sizeof(listener_instance.pImgData));
// Reenable GPU access to the vertex buffer data.
g_pd3dDeviceContext->Unmap(pTexture, 0);
std::cout << "texture updated" << std::endl;
}
I don't get an error, the image is just black. I don't have debug layer enabled though.
Calling sizeof on a pointer listener_instance.pImgData is not what you want since it returns the size of a pointer type (8 on x64 architecture) and not size of array pointed by the pointer. Calling memcpy with the image data size in bytes is also not completely correct solution. See here for more details.
I will copy the answer from there just in case it's deleted.
Maximus Minimus's answer:
Check the returned pitch from your map call - you're assuming it's width * 4 (for 32-bit RGBA) but it may not be (particularly if your texture is not a power of 2 or it's width is not a multiple of 4).
You can only memcpy the entire block in one operation if pitch is equal to width * number of bytes in the format. Otherwise you must memcpy one row at a time.
Sample code, excuse C-isms:
assumes that src and dst are 32-bit RGBA data
unsigned *src; // this comes from whatever your input is
unsigned *dst = (unsigned *) msr.pData; // msr is a D3D11_MAPPED_SUBRESOURCE derived from ID3D11DeviceContext::Map
width and height come from ID3D11Texture2D::GetDesc
for (int i = 0; i < height; i++)
{
memcpy (dst, src, width * 4); // copy one row at a time because msr.RowPitch may be != (width * 4)
dst += msr.RowPitch >> 2; // msr.RowPitch is in bytes so for 32-bit data we divide by 4 (or downshift by 2, same thing)
src += width; // assumes pitch of source data is equal to width * 4
}
You can, of course, also include a test for if (msr.RowPitch == width * 4) and do a single memcpy of the entire thing if it's true.

How to Render a Bitmap from BITMAPINFOHEADER and BYTE using Direct2D

I am trying to create a C++ application which actually captures the bitmap from magnifier and render it using Direct 2d.
I am currently having the code to save the bitmap from magnifier to a file. but what I need is to do is to draw that bitmap to my window using direct 2d instead of saving it to file.
The magnifier returns image as struct in the form of MAGIMAGEHEADER and I was able to obtain BITMAPINFOHEADER and byte from it. I need to render it to a window using direct 2D.
Here is the code used to obtain the BITMAPINFOHEADER and bytes from Magnifier API
BOOL MagImageScaling(HWND hwnd, void *srcdata, MAGIMAGEHEADER srcheader, void *destdata, MAGIMAGEHEADER destheader,RECT unclipped, RECT clipped, HRGN dirty)
{
// Setup the bitmap info header
bmif.biSize = sizeof(BITMAPINFOHEADER);
bmif.biHeight = srcheader.height;
bmif.biWidth = srcheader.width;
bmif.biSizeImage = srcheader.cbSize;
bmif.biPlanes = 1;
bmif.biBitCount = (WORD)(bmif.biSizeImage / bmif.biHeight / bmif.biWidth * 8);
bmif.biCompression = BI_RGB;
// Prepare the buffer
if (pData != NULL)
{
delete pData;
pData = NULL;
}
pData = (BYTE*)malloc(bmif.biSizeImage);
memcpy(pData, srcdata, bmif.biSizeImage);
// The data bit is in top->bottom order, so we convert it to bottom->top order
LONG lineSize = bmif.biWidth * bmif.biBitCount / 8;
BYTE* pLineData = new BYTE[lineSize];
BYTE* pStart;
BYTE* pEnd;
LONG lineStart = 0;
LONG lineEnd = bmif.biHeight - 1;
while (lineStart < lineEnd)
{
// Get the address of the swap line
pStart = pData + (lineStart * lineSize);
pEnd = pData + (lineEnd * lineSize);
// Swap the top with the bottom
memcpy(pLineData, pStart, lineSize);
memcpy(pStart, pEnd, lineSize);
memcpy(pEnd, pLineData, lineSize);
// Adjust the line index
lineStart++;
lineEnd--;
}
delete pLineData;
// Set the flag to say that the callback function is finished
bCallbacked = TRUE;
return TRUE;
}
Here the variable bmif is BITMAPINFOHEADER and pData is the Bytes
Is there any way to achieve this?
if you have the HBITMAP handle, you can do this:
The the size of your image using: ::GetObject(hBmp, sizeof(BITMAP), &bmpSizeInfo);
fill a BITMAPINFO like this:
memset(&bmpData, 0, sizeof(BITMAPINFO));
bmpData.bmiHeader.biSize = sizeof(bmpData.bmiHeader);
bmpData.bmiHeader.biHeight = -bmpSizeInfo.bmHeight;
bmpData.bmiHeader.biWidth = bmpSizeInfo.bmWidth;
bmpData.bmiHeader.biPlanes = bmpSizeInfo.bmPlanes;
bmpData.bmiHeader.biBitCount = bmpSizeInfo.bmBitsPixel;
create enough heap memory to hold the data for your bitmap:
pBuff = new char[bmpSizeInfo.bmWidth * bmpSizeInfo.bmHeight * 4];
Get the bitmap data like this:
::GetDIBits(hDc, hBmp, 0, bmpSizeInfo.bmHeight, (void*)pBuff, &bmpData, DIB_RGB_COLORS);
Create a D2D1_BITMAP_PROPERTIES and fill it like this:
bmpPorp.dpiX = 0.0f;
bmpPorp.dpiY = 0.0f;
bmpPorp.pixelFormat.format = DXGI_FORMAT_B8G8R8A8_UNORM;
bmpPorp.pixelFormat.alphaMode = D2D1_ALPHA_MODE_IGNORE;
Using your render target turn the data into ID2D1Bitmap
pRT->CreateBitmap(bmpSize, pBuff, 4 * bmpSizeInfo.bmWidth, bmpPorp, &pBmpFromH);
Here is how you can use mag's bitmap with Direct2D. You don't need BITMAPINFOHEADER since mag format is the same as DXGI_FORMAT_B8G8R8A8_UNORM:
BOOL MagImageScaling(HWND hwnd, void* srcdata, MAGIMAGEHEADER srcheader, void* destdata, MAGIMAGEHEADER destheader, RECT unclipped, RECT clipped, HRGN dirty)
{
// note: all this (dc, surface, targte) can be created only once as long as the D3D device isn't reset
ComPtr<ID2D1DeviceContext> dc;
HR(d2Device->CreateDeviceContext(D2D1_DEVICE_CONTEXT_OPTIONS_NONE, dc.GetAddressOf()));
ComPtr<IDXGISurface2> surface;
HR(swapChain->GetBuffer(0, IID_PPV_ARGS(&surface)));
ComPtr<ID2D1Bitmap1> target;
HR(dc->CreateBitmapFromDxgiSurface(surface.Get(), NULL, target.GetAddressOf()));
dc->SetTarget(target.Get());
D2D1_BITMAP_PROPERTIES properties = {};
properties.pixelFormat.alphaMode = D2D1_ALPHA_MODE_PREMULTIPLIED;
// note: this is ok as srcheader.format (GUID_WICPixelFormat32bppRGBA) is compatible
properties.pixelFormat.format = DXGI_FORMAT_B8G8R8A8_UNORM;
D2D1_SIZE_U size = {};
size.width = srcheader.width;
size.height = srcheader.height;
ComPtr<ID2D1Bitmap> bitmap;
HR(dc->CreateBitmap(size, properties, bitmap.GetAddressOf()));
HR(bitmap->CopyFromMemory(NULL, srcdata, srcheader.stride));
dc->BeginDraw();
// note: we don't call this because we draw on the whole render target
//dc->Clear();
dc->DrawBitmap(bitmap.Get());
HR(dc->EndDraw());
HR(swapChain->Present(1, 0));
return TRUE;
}

DXGI Screen capture distorted image

Not only that the fps drops form 60 to 20-21 but the image also looks distorted like this. Second image is what it should look like
What it looks like
What it should look like
if (captureVideo == 1) {
pNewTexture = NULL;
// Use the IDXGISwapChain::GetBuffer API to retrieve a swap chain surface ( use the uuid ID3D11Texture2D for the result type ).
pSwapChain->GetBuffer( 0, __uuidof( ID3D11Texture2D ), reinterpret_cast< void** >( &pSurface ) );
/* The swap chain buffers are not mapable, so I need to copy it to a staging resource. */
pSurface->GetDesc( &description ); //Use ID3D11Texture2D::GetDesc to retrieve the surface description
// Patch it with a D3D11_USAGE_STAGING usage and a cpu access flag of D3D11_CPU_ACCESS_READ
description.BindFlags = 0;
description.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
description.Usage = D3D11_USAGE_STAGING;
// Create a temporary surface ID3D11Device::CreateTexture2D
HRESULT hr = pDevice->CreateTexture2D( &description, NULL, &pNewTexture );
if( pNewTexture )
{
// Copy to the staging surface ID3D11DeviceContext::CopyResource
pContext->CopyResource( pNewTexture, pSurface );
// Now I have a ID3D11Texture2D with the content of your swap chain buffer that allow you to use the ID3D11DeviceContext::Map API to read it on the CPU
D3D11_MAPPED_SUBRESOURCE resource;
pContext->Map( pNewTexture, D3D11CalcSubresource( 0, 0, 0), D3D11_MAP_READ, 0, &resource );
const int pitch = w << 2;
const unsigned char* source = static_cast< const unsigned char* >( resource.pData );
unsigned char* dest = static_cast< unsigned char* >(m_lpBits);
for( int i = 0; i < h; ++i )
{
memcpy( dest, source, w * 4 );
source += pitch;
dest += pitch;
}
AppendNewFrame(w, h, m_lpBits,24);
pContext->Unmap( pNewTexture, 0);
pNewTexture->Release();
}
}
The code snippet even though incomplete shows several potential problems:
Number of 24 in AppendNewFrame line suggests that you are trying to treat the data as 24-bit RGB, and your data is 32-bit RGB. Such mistreatment matches the artifacts exhibited on the attached images;
Pitch/stride is taken as assumed default, while you have the effectively used one in D3D11_MAPPED_SUBRESOURCE structure and you should be using it.

Convert IMFSample* to ID3D11ShaderResourceView*

I am new to DirectX an I am trying to do a simple application that reads a video and display it on a Quad.
I read the video using Windows Media Foundation (IMFSourceReader), that sends me a callback when a sample is decoded (IMFSample).
I want to convert this IMFSample* to a ID3D11ShaderResourceView* in order to use it as a texture to draw my quad, however the conversion fails.
Here is what I do (I removed non relevant error checks):
HRESULT SourceReaderCB::OnReadSample(HRESULT hrStatus, DWORD dwStreamIndex, DWORD dwStreamFlags, LONGLONG llTimestamp, IMFSample *pSample)
{
...
DWORD NumBuffers = 0;
hr = pSample->GetBufferCount(&NumBuffers);
if (FAILED(hr) || NumBuffers < 1)
{
...
}
IMFMediaBuffer* SourceMediaPtr = nullptr;
hr = pSample->GetBufferByIndex(0, &SourceMediaPtr);
if (FAILED(hr))
{
...
}
ComPtr<IMFMediaBuffer> _pInputBuffer = SourceMediaPtr;
ComPtr<IMF2DBuffer2> _pInputBuffer2D2;
bool isVideoFrame = (_pInputBuffer.As(&_pInputBuffer2D2) == S_OK);
if (isVideoFrame)
{
IMFDXGIBuffer* pDXGIBuffer = NULL;
ID3D11Texture2D* pSurface = NULL;
hr = _pInputBuffer->QueryInterface(__uuidof(IMFDXGIBuffer), (LPVOID*)&pDXGIBuffer);
if (FAILED(hr))
{
SafeRelease(&SourceMediaPtr);
goto done;
}
hr = pDXGIBuffer->GetResource(__uuidof(ID3D11Texture2D), (LPVOID*)&pSurface);
if (FAILED(hr))
{
...
}
ID3D11ShaderResourceView* resourceView;
if (pSurface)
{
D3D11_TEXTURE2D_DESC textureDesc;
pSurface->GetDesc(&textureDesc);
D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc;
shaderResourceViewDesc.Format = DXGI_FORMAT_R8_UNORM;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
ID3D11ShaderResourceView* resourceView;
hr = d3d11device->CreateShaderResourceView(pSurface, &shaderResourceViewDesc, &resourceView);
if (FAILED(hr))
{
... // CODE FAILS HERE
}
...
}
}
}
My first issue is that I set the shaderResourceViewDesc.Format as DXGI_FORMAT_R8_UNORM which will probably just give me red image (I will have to investigate this later).
The second and blocking issue I am facing ius that the conversion of ID3D11Texture2D to ID3D11ShaderResourceView fails with following error message:
ID3D11Device::CreateShaderResourceView: A ShaderResourceView cannot be created of a Resource that did not specify the D3D11_BIND_SHADER_RESOURCE BindFlag. [ STATE_CREATION ERROR #129: CREATESHADERRESOURCEVIEW_INVALIDRESOURCE]
I understand that there is a flag missing at the creation of the texture that prevents me to do what I want to do, but as the data buffer is created by WMF, I am not sure what I am supposed to do to fix this issue.
Thanks for your help
I see you code, and I can say that your way is wrong - no offense. Firstly, video decoder creates simple texture - in you situation DirectX11 texture - it is a regular texture - it is not shader resource, as a result it cannot be used in shader code. In my view, there are two way for resolving of your task:
Research - Walkthrough: Using MF to render video in a Direct3D app - this link present way for "Walkthrough: Using Microsoft Media Foundation for Windows Phone 8" - from your code I see that you try write solution for WindowsStore - UWP and code for Windows Phone is workable - this code needs MediaEnginePlayer - The MediaEnginePlayer class serves as a helper class that wraps the MF APIs;
Find on GitHub Windows-classic-samples and find in that DX11VideoRenderer - this is full code of Media Foundation renderer with DirectX11 - it includes very good example for using of DirectX11 Video Processor which does blitting of regular video texture from decoder into the rendering video texture of swap-chain:
2.1. Get rendering texture from Swap Chain:
// Get Backbuffer
hr = m_pSwapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&pDXGIBackBuffer);
if (FAILED(hr))
{
break;
}
2.2. Create from rendering texture output view of video processor:
//
// Create Output View of Output Surfaces.
//
D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC OutputViewDesc;
ZeroMemory( &OutputViewDesc, sizeof( OutputViewDesc ) );
if (m_b3DVideo && m_bStereoEnabled)
{
OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2DARRAY;
}
else
{
OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D;
}
OutputViewDesc.Texture2D.MipSlice = 0;
OutputViewDesc.Texture2DArray.MipSlice = 0;
OutputViewDesc.Texture2DArray.FirstArraySlice = 0;
if (m_b3DVideo && 0 != m_vp3DOutput)
{
OutputViewDesc.Texture2DArray.ArraySize = 2; // STEREO
}
QueryPerformanceCounter(&lpcStart);
hr = m_pDX11VideoDevice->CreateVideoProcessorOutputView(pDXGIBackBuffer, m_pVideoProcessorEnum, &OutputViewDesc, &pOutputView);
2.3. Create from regular decoder video texture input view for video processor:
D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC InputLeftViewDesc;
ZeroMemory( &InputLeftViewDesc, sizeof( InputLeftViewDesc ) );
InputLeftViewDesc.FourCC = 0;
InputLeftViewDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
InputLeftViewDesc.Texture2D.MipSlice = 0;
InputLeftViewDesc.Texture2D.ArraySlice = dwLeftViewIndex;
hr = m_pDX11VideoDevice->CreateVideoProcessorInputView(pLeftTexture2D, m_pVideoProcessorEnum, &InputLeftViewDesc, &pLeftInputView);
if (FAILED(hr))
{
break;
}
2.4. Do blitting of regular decoder video texture on rendering texture from Swap Chain:
D3D11_VIDEO_PROCESSOR_STREAM StreamData;
ZeroMemory( &StreamData, sizeof( StreamData ) );
StreamData.Enable = TRUE;
StreamData.OutputIndex = 0;
StreamData.InputFrameOrField = 0;
StreamData.PastFrames = 0;
StreamData.FutureFrames = 0;
StreamData.ppPastSurfaces = NULL;
StreamData.ppFutureSurfaces = NULL;
StreamData.pInputSurface = pLeftInputView;
StreamData.ppPastSurfacesRight = NULL;
StreamData.ppFutureSurfacesRight = NULL;
if (m_b3DVideo && MFVideo3DSampleFormat_MultiView == m_vp3DOutput && pRightTexture2D)
{
StreamData.pInputSurfaceRight = pRightInputView;
}
hr = pVideoContext->VideoProcessorBlt(m_pVideoProcessor, pOutputView, 0, 1, &StreamData );
if (FAILED(hr))
{
break;
}
Yes, they are sections of complex code, and it needs research whole DX11VideoRenderer project for understanding of it - it will take huge amount of time.
Regards,
Evgeny Pereguda
Debug output suggests that the texture is not compatible, as it was created without D3D11_BIND_SHADER_RESOURCE flag (specified in BindFlag field of D3D11_TEXTURE2D_DESC structure.
You read the texture already created by Media Foundation primitive. In some cases you can alter the creation flags, however the general case is that you need to create a compatible texture on your own, copy the data between the textures, and then call CreateShaderResourceView method with your texture as an argument rather than original texture.

DirectX Partial Screen Capture

I am trying to create a program that will capture a full screen directx application, look for a specific set of pixels on the screen and if it finds it then draw an image on the screen.
I have been able to set up the application to capture the screen the directx libraries using the code the answer for this question Capture screen using DirectX
In this example the code saves to the harddrive using the IWIC libraries. I would rather manipulate the pixels instead of saving it.
After I have captured the screen and have a LPBYTE of the entire screen pixels I am unsure how to crop it to the region I want and then being able to manipulate the pixel array. Is it just a multi dimensional byte array?
The way I think I should do it is
Capture screen to IWIC bitmap (done).
Convert IWIC bitmap to ID2D1 bitmap using ID2D1RenderTarget::CreateBitmapFromWicBitmap
Create new ID2D1::Bitmap to store partial image.
Copy region of the ID2D1 bitmap to a new bitmap using ID2D1::CopyFromBitmap.
Render back onto screen using ID2D1 .
Any help on any of this would be so much appreciated.
Here is a modified version of the original code that only captures a portion of the screen into a buffer, and also gives back the stride. Then it browses all the pixels, dumps their colors as a sample usage of the returned buffer.
In this sample, the buffer is allocated by the function, so you must free it once you've used it:
// sample usage
int main()
{
LONG left = 10;
LONG top = 10;
LONG width = 100;
LONG height = 100;
LPBYTE buffer;
UINT stride;
RECT rc = { left, top, left + width, top + height };
Direct3D9TakeScreenshot(D3DADAPTER_DEFAULT, &buffer, &stride, &rc);
// In 32bppPBGRA format, each pixel is represented by 4 bytes
// with one byte each for blue, green, red, and the alpha channel, in that order.
// But don't forget this is all modulo endianness ...
// So, on Intel architecture, if we read a pixel from memory
// as a DWORD, it's reversed (ARGB). The macros below handle that.
// browse every pixel by line
for (int h = 0; h < height; h++)
{
LPDWORD pixels = (LPDWORD)(buffer + h * stride);
for (int w = 0; w < width; w++)
{
DWORD pixel = pixels[w];
wprintf(L"#%02X#%02X#%02X#%02X\n", GetBGRAPixelAlpha(pixel), GetBGRAPixelRed(pixel), GetBGRAPixelGreen(pixel), GetBGRAPixelBlue(pixel));
}
}
// get pixel at 50, 50 in the buffer, as #ARGB
DWORD pixel = GetBGRAPixel(buffer, stride, 50, 50);
wprintf(L"#%02X#%02X#%02X#%02X\n", GetBGRAPixelAlpha(pixel), GetBGRAPixelRed(pixel), GetBGRAPixelGreen(pixel), GetBGRAPixelBlue(pixel));
SavePixelsToFile32bppPBGRA(width, height, stride, buffer, L"test.png", GUID_ContainerFormatPng);
LocalFree(buffer);
return 0;;
}
#define GetBGRAPixelBlue(p) (LOBYTE(p))
#define GetBGRAPixelGreen(p) (HIBYTE(p))
#define GetBGRAPixelRed(p) (LOBYTE(HIWORD(p)))
#define GetBGRAPixelAlpha(p) (HIBYTE(HIWORD(p)))
#define GetBGRAPixel(b,s,x,y) (((LPDWORD)(((LPBYTE)b) + y * s))[x])
int main()
HRESULT Direct3D9TakeScreenshot(UINT adapter, LPBYTE *pBuffer, UINT *pStride, const RECT *pInputRc = nullptr)
{
if (!pBuffer || !pStride) return E_INVALIDARG;
HRESULT hr = S_OK;
IDirect3D9 *d3d = nullptr;
IDirect3DDevice9 *device = nullptr;
IDirect3DSurface9 *surface = nullptr;
D3DPRESENT_PARAMETERS parameters = { 0 };
D3DDISPLAYMODE mode;
D3DLOCKED_RECT rc;
*pBuffer = NULL;
*pStride = 0;
// init D3D and get screen size
d3d = Direct3DCreate9(D3D_SDK_VERSION);
HRCHECK(d3d->GetAdapterDisplayMode(adapter, &mode));
LONG width = pInputRc ? (pInputRc->right - pInputRc->left) : mode.Width;
LONG height = pInputRc ? (pInputRc->bottom - pInputRc->top) : mode.Height;
parameters.Windowed = TRUE;
parameters.BackBufferCount = 1;
parameters.BackBufferHeight = height;
parameters.BackBufferWidth = width;
parameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
parameters.hDeviceWindow = NULL;
// create device & capture surface (note it needs desktop size, not our capture size)
HRCHECK(d3d->CreateDevice(adapter, D3DDEVTYPE_HAL, NULL, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &parameters, &device));
HRCHECK(device->CreateOffscreenPlainSurface(mode.Width, mode.Height, D3DFMT_A8R8G8B8, D3DPOOL_SYSTEMMEM, &surface, nullptr));
// get pitch/stride to compute the required buffer size
HRCHECK(surface->LockRect(&rc, pInputRc, 0));
*pStride = rc.Pitch;
HRCHECK(surface->UnlockRect());
// allocate buffer
*pBuffer = (LPBYTE)LocalAlloc(0, *pStride * height);
if (!*pBuffer)
{
hr = E_OUTOFMEMORY;
goto cleanup;
}
// get the data
HRCHECK(device->GetFrontBufferData(0, surface));
// copy it into our buffer
HRCHECK(surface->LockRect(&rc, pInputRc, 0));
CopyMemory(*pBuffer, rc.pBits, rc.Pitch * height);
HRCHECK(surface->UnlockRect());
cleanup:
if (FAILED(hr))
{
if (*pBuffer)
{
LocalFree(*pBuffer);
*pBuffer = NULL;
}
*pStride = 0;
}
RELEASE(surface);
RELEASE(device);
RELEASE(d3d);
return hr;
}