MapColorFrameToDepthFrame (Unhandled Exception, Access Violation) - c++

I tried to align the depth image with color image using the MapColorFrameToDepthFrame function but there is a problem at this line of code (Unhandled Exception, Access Violation):
pMapper->MapColorFrameToDepthFrame(
NUI_IMAGE_TYPE_COLOR,
NUI_IMAGE_RESOLUTION_640x480,
NUI_IMAGE_RESOLUTION_640x480,
640 * 480,
(NUI_DEPTH_IMAGE_PIXEL*)LockedRect.Pitch,
640 * 480,
depthPoints);
Here is the code of the Nui_GotDepthAlert:
bool CSkeletalViewerApp::Nui_GotDepthAlert( ){
NUI_IMAGE_FRAME imageFrame;
bool processedFrame = true;
HRESULT hr = m_pNuiSensor->NuiImageStreamGetNextFrame(
m_pDepthStreamHandle,
0,
&imageFrame );
if ( FAILED( hr ) )
{
return false;
}
m_depthTimeStamp = imageFrame.liTimeStamp;
INuiFrameTexture * pTexture = imageFrame.pFrameTexture;
NUI_LOCKED_RECT LockedRect;
pTexture->LockRect( 0, &LockedRect, NULL, 0 );
if ( 0 != LockedRect.Pitch )
{
INuiCoordinateMapper* pMapper;
NUI_DEPTH_IMAGE_POINT* depthPoints;
depthPoints = new NUI_DEPTH_IMAGE_POINT[640 * 480];
m_pNuiSensor->NuiGetCoordinateMapper(&pMapper);
//NUI_DEPTH_IMAGE_PIXEL* pdepthpixel = (NUI_DEPTH_IMAGE_PIXEL*)LockedRect.Pitch;
pMapper->MapColorFrameToDepthFrame(
NUI_IMAGE_TYPE_COLOR,
NUI_IMAGE_RESOLUTION_640x480,
NUI_IMAGE_RESOLUTION_640x480,
640 * 480,
(NUI_DEPTH_IMAGE_PIXEL*)LockedRect.Pitch,
640 * 480,
depthPoints);
//memcpy(m_depthD16, LockedRect.pBits, LockedRect.size);
DWORD frameWidth, frameHeight;
NuiImageResolutionToSize( imageFrame.eResolution, frameWidth, frameHeight );
// draw the bits to the bitmap
BYTE * rgbrun = m_depthRGBX;
const USHORT * pBufferRun = (const USHORT *)LockedRect.pBits;
depthData = (USHORT *)LockedRect.pBits;
// end pixel is start + width*height - 1
const USHORT * pBufferEnd = pBufferRun + (frameWidth * frameHeight);
assert( frameWidth * frameHeight * g_BytesPerPixel <= ARRAYSIZE(m_depthRGBX) );
USHORT depth;
USHORT* depth1=(USHORT *)LockedRect.pBits;
USHORT realDepth;
while ( pBufferRun < pBufferEnd )//&& pDepth < depthEnd)
{
/**depthValues = pDepth->depth;
depthValues++;*/
//USHORT depth = *pBufferRun;
depth = *pBufferRun;
USHORT realDepth = NuiDepthPixelToDepth(depth);
USHORT player = NuiDepthPixelToPlayerIndex(depth);
// transform 13-bit depth information into an 8-bit intensity appropriate
// for display (we disregard information in most significant bit)
BYTE intensity = static_cast<BYTE>(~(realDepth >> 4));
// tint the intensity by dividing by per-player values
*(rgbrun++) = intensity >> g_IntensityShiftByPlayerB[player];
*(rgbrun++) = intensity >> g_IntensityShiftByPlayerG[player];
*(rgbrun++) = intensity >> g_IntensityShiftByPlayerR[player];
// no alpha information, skip the last byte
++rgbrun;
++pBufferRun;
}
m_pDrawDepth->Draw( m_depthRGBX, frameWidth * frameHeight * g_BytesPerPixel );
}
else
{
processedFrame = false;
OutputDebugString( L"Buffer length of received texture is bogus\r\n" );
}
pTexture->UnlockRect( 0 );
if(m_pDepthStreamHandle != NULL)
m_pNuiSensor->NuiImageStreamReleaseFrame( m_pDepthStreamHandle, &imageFrame );
return processedFrame;
}
Could someone tell me how to fix this problem?
Thank you

there,
I was having some similar problems, after struggling long time on it and finally find the trick of it.
First problem i found in your code is in the MapColorFrameToDepthFrame function, the fifth parameter seems should be LockedRect.pBits.
Second problem is how to define the pTexture,
INuiFrameTexture* pTexture = imageFrame.pFrameTexture;
will not work. You have to use
m_pNuiSensor->NuiImageFrameGetDepthImagePixelFrameTexture(m_hDepthStreamHandle, &imageFrame, &bNearMode, &pTexture)
remember to check if you succeeded on this function. I have dig deep to find why.
Third problem is, you have to
delete[] depthPoints;
after use to release to memory for it, because you used new operator on it.
After all of this, my code finally is working. Hope it will be helpful for you.
Thanks.

Related

Legacy C++ code not displaying bitmap under Windows 10

I have to get some legacy (XP) MFC code running under Windows 10. It runs fine under Windows 7. I haven't tried any other versions.
The idea is that there is a buffer in memory that contains the image pixel values in RGB.
Then to display the image to the screen it is transformed into a bitmap and then copied to the DC.
This is the function that does the display of the object to the screen.
It seems to work, but then nothing gets shown on the screen except a white box.
(The comments were already there! someone else must have had some fun with this as well!)
WORD* CFBuffer24::getBitmap(int nBitsPerPixel)
{
// check operation is valid
ASSERT(m_pFB[RED] && m_pFB[GREEN] && m_pFB[BLUE]);
if (nBitsPerPixel == 24)
{
if(NULL == m_pbm24)
m_pbm24 = new UCHAR[((m_fbSize.cx*3+3) & ~3)*m_fbSize.cy];
ASSERT(m_pbm24);
UCHAR* rptr = m_pFB[RED]->getPointer(0,0);
UCHAR* gptr = m_pFB[GREEN]->getPointer(0,0);
UCHAR* bptr = m_pFB[BLUE]->getPointer(0,0);
UCHAR* sptr = m_pbm24;
if (m_dGamma == 1.0)
{
for (int i = 0; i < m_fbSize.cx*m_fbSize.cy; i++, rptr++, gptr++, bptr++)
{
// Assumes 24bit display ie. B R G format
*sptr++ = *bptr;
*sptr++ = *gptr;
*sptr++ = *rptr;
}
}
else
{
UCHAR* wLUT = new UCHAR[256];
for (int i = 0; i < 256; i++)
{
int val = (int) (255.0 * pow((double) i / 255.0,1.0/m_dGamma) + 0.5);
if (val > 255)
val = 255;
wLUT[i] = UCHAR(val);
}
for (i = 0; i < m_fbSize.cx*m_fbSize.cy; i++, rptr++, gptr++, bptr++)
{
// Assumes 16bit display ie. 5R:6G:5B format
*sptr++ = wLUT[*bptr];
*sptr++ = wLUT[*gptr];
*sptr++ = wLUT[*rptr];
}
delete [] wLUT;
}
return((WORD*)m_pbm24);
}
//
if (nBitsPerPixel == 16)
{
if(NULL == m_pbm16)
m_pbm16= new WORD[((m_fbSize.cx*2+3) & ~3)*m_fbSize.cy];
ASSERT(NULL != m_pbm16);
UCHAR* rptr = m_pFB[RED]->getPointer(0,0);
UCHAR* gptr = m_pFB[GREEN]->getPointer(0,0);
UCHAR* bptr = m_pFB[BLUE]->getPointer(0,0);
WORD* sptr = m_pbm16;
if (m_dGamma == 1.0)
{
for (int i = 0; i < m_fbSize.cx*m_fbSize.cy; i++, rptr++, gptr++, bptr++)
{
// Assumes 16bit display ie. 5R:6G:5B format
*sptr++ = (WORD) ((((WORD)*bptr>>3)&0x001F) |
(((WORD)*gptr<<3)&0x07E0) |
(((WORD)*rptr<<8)&0xF800));
}
}
else
{
WORD* wLUT = new WORD[256];
for (int i = 0; i < 256; i++)
{
wLUT[i] = (WORD) (255.0 * pow((double) i / 255.0,1.0/m_dGamma) + 0.5);
if (wLUT[i] > 255)
wLUT[i] = 255;
}
for (i = 0; i < m_fbSize.cx*m_fbSize.cy; i++, rptr++, gptr++, bptr++)
{
// Assumes 16bit display ie. 5R:6G:5B format
*sptr++ = (WORD) (((wLUT[*bptr]>>3)&0x001F) |
((wLUT[*gptr]<<3)&0x07E0) |
((wLUT[*rptr]<<8)&0xF800));
}
delete [] wLUT;
}
return(m_pbm16);
}
// Bits per pixel must be wrong
return 0;
}
bool CImDisplay::CheckDisplaySize(CSize Imsize)
{
if (Imsize != m_Imsize) // resize the buffer
{
m_Imsize = Imsize;
// check the display capabilities
CDC* pDC;
pDC = m_pWnd->GetDC();
//Get the display capabilities
m_nBitPlanes = pDC->GetDeviceCaps( PLANES ); //Usually 1
m_nBitsPerPixel = pDC->GetDeviceCaps( BITSPIXEL );//Usually number of colours
m_pWnd->ReleaseDC(pDC);
if( m_nBitsPerPixel != 16 && m_nBitsPerPixel != 24 )
{
::MessageBox(::GetActiveWindow(),"Video mode is not compatable with\noutput resolution. Change Screen/Settings to 16 or 24 bits.","Unable to Display image", MB_ICONSTOP);
return false;
}
if (m_pBuf != 0)
delete [] m_pBuf;
m_pBuf = new UCHAR[m_Imsize.cx * m_Imsize.cy * m_nBitsPerPixel / 8];
if( !m_Map.CreateBitmap( m_Imsize.cx, m_Imsize.cy, m_nBitPlanes, m_nBitsPerPixel, m_pBuf ) )
::MessageBox(::GetActiveWindow(),"Can not create bitmap","Unable to Display image", MB_ICONSTOP);
}
return true;
}
void CImDisplay::Display(CFBuffer24* Image, CRect subRect)
{
CheckWindowPointer();
// Build the bastard bitmap
CSize imsize = Image->getBufferSize();
CheckDisplaySize(imsize);
// Write the Data to the Bit map
// Copies the image from our 24bit buffer into a bitmap and returns the pointer to the bitmap
DWORD dRet = m_Map.SetBitmapBits(imsize.cx*imsize.cy*m_nBitsPerPixel/8, Image->getBitmap(m_nBitsPerPixel) );
//Draw the bastard thing
CDC* pDC;
pDC = m_pWnd->GetDC();
CDC MemDC;
MemDC.CreateCompatibleDC( pDC );
CBitmap *pOldBitmap = MemDC.SelectObject( &m_Map );
pDC->SetStretchBltMode(COLORONCOLOR); // this gets the colours looking correct
pDC->StretchBlt( m_Location.TopLeft().x, m_Location.TopLeft().y,
m_Location.Width(), m_Location.Height(),
&MemDC, imSubRect.left, imSubRect.top,
imSubRect.Width(), imSubRect.Height(), SRCCOPY );
MemDC.SelectObject( pOldBitmap ); //Release the object
m_pWnd->ReleaseDC(pDC);
}
I run the program in compatibility 16bit colour on Windows 10. This is so it will actually run, and it runs fine otherwise.
I have been able to get something to display on Windows 10 when I have used CreateCompatibleBitmap() instead of CreateBitmap(), but the colours are all garbled. On Windows 7, they are fine. Perhaps this is a clue, but I can't work out what it means.
Your code can miscalculate required space for bitmaps if the width is not multiple of 4. It should be required_size = ( (width * bits_per_pixel / 8 + 3) & ~3 ) * height.
If CFBuffer24 handles stride (bytes per row) correctly, you can do stretching blit directly to device context:
void CImDisplay::Display(CFBuffer24* Image, CRect subRect)
{
CheckWindowPointer();
CDC* pDC = m_pWnd->GetDC();
m_nBitPlanes = pDC->GetDeviceCaps( PLANES ); //Usually 1
m_nBitsPerPixel = pDC->GetDeviceCaps( BITSPIXEL );
if( m_nBitsPerPixel != 16 && m_nBitsPerPixel != 24 && m_nBitsPerPixel != 32 )
{
::MessageBox(::GetActiveWindow(),"Video mode is not compatable with\noutput resolution. Change Screen/Settings to 16 or 24 bits.","Unable to Display image", MB_ICONSTOP);
return;
}
if ( m_nBitsPerPixel == 32 )
m_nBitsPerPixel = 24;
m_Imsize = Image->getBufferSize();
// Bitmap rows are aligned to multiplies of 4 bytes
int stride = (m_Imsize.cx * m_nBitsPerPixel / 8 + 3) & ~3;
BITMAPINFO bi =
{
sizeof( BITMAPINFOHEADER ),
stride,
-m_Imsize.cy, // If bitmap looks upside down remove minus
1,
m_nBitsPerPixel,
BI_RGB,
0,
0,
0,
0,
0
};
pDC->SetStretchBltMode(COLORONCOLOR);
::StretchDIBits(
pDC,
m_Location.TopLeft().x,
m_Location.TopLeft().y,
m_Location.Width(),
m_Location.Height(),
// !!!! I don't know what imSubRect is. Is it related to subRect?
imSubRect.left,
imSubRect.top,
imSubRect.Width(),
imSubRect.Height(),
Image->getBitmap(m_nBitsPerPixel),
&bi,
DIB_RGB_COLORS,
SRCCOPY );
m_pWnd->ReleaseDC(pDC);
}
Above code leaves conversion from 24 to 32 bits to the StretchDIBits. It should be OK performance wise. Alternatively you can implement 32 bit case in the CFBuffer24.

DirectX Partial Screen Capture

I am trying to create a program that will capture a full screen directx application, look for a specific set of pixels on the screen and if it finds it then draw an image on the screen.
I have been able to set up the application to capture the screen the directx libraries using the code the answer for this question Capture screen using DirectX
In this example the code saves to the harddrive using the IWIC libraries. I would rather manipulate the pixels instead of saving it.
After I have captured the screen and have a LPBYTE of the entire screen pixels I am unsure how to crop it to the region I want and then being able to manipulate the pixel array. Is it just a multi dimensional byte array?
The way I think I should do it is
Capture screen to IWIC bitmap (done).
Convert IWIC bitmap to ID2D1 bitmap using ID2D1RenderTarget::CreateBitmapFromWicBitmap
Create new ID2D1::Bitmap to store partial image.
Copy region of the ID2D1 bitmap to a new bitmap using ID2D1::CopyFromBitmap.
Render back onto screen using ID2D1 .
Any help on any of this would be so much appreciated.
Here is a modified version of the original code that only captures a portion of the screen into a buffer, and also gives back the stride. Then it browses all the pixels, dumps their colors as a sample usage of the returned buffer.
In this sample, the buffer is allocated by the function, so you must free it once you've used it:
// sample usage
int main()
{
LONG left = 10;
LONG top = 10;
LONG width = 100;
LONG height = 100;
LPBYTE buffer;
UINT stride;
RECT rc = { left, top, left + width, top + height };
Direct3D9TakeScreenshot(D3DADAPTER_DEFAULT, &buffer, &stride, &rc);
// In 32bppPBGRA format, each pixel is represented by 4 bytes
// with one byte each for blue, green, red, and the alpha channel, in that order.
// But don't forget this is all modulo endianness ...
// So, on Intel architecture, if we read a pixel from memory
// as a DWORD, it's reversed (ARGB). The macros below handle that.
// browse every pixel by line
for (int h = 0; h < height; h++)
{
LPDWORD pixels = (LPDWORD)(buffer + h * stride);
for (int w = 0; w < width; w++)
{
DWORD pixel = pixels[w];
wprintf(L"#%02X#%02X#%02X#%02X\n", GetBGRAPixelAlpha(pixel), GetBGRAPixelRed(pixel), GetBGRAPixelGreen(pixel), GetBGRAPixelBlue(pixel));
}
}
// get pixel at 50, 50 in the buffer, as #ARGB
DWORD pixel = GetBGRAPixel(buffer, stride, 50, 50);
wprintf(L"#%02X#%02X#%02X#%02X\n", GetBGRAPixelAlpha(pixel), GetBGRAPixelRed(pixel), GetBGRAPixelGreen(pixel), GetBGRAPixelBlue(pixel));
SavePixelsToFile32bppPBGRA(width, height, stride, buffer, L"test.png", GUID_ContainerFormatPng);
LocalFree(buffer);
return 0;;
}
#define GetBGRAPixelBlue(p) (LOBYTE(p))
#define GetBGRAPixelGreen(p) (HIBYTE(p))
#define GetBGRAPixelRed(p) (LOBYTE(HIWORD(p)))
#define GetBGRAPixelAlpha(p) (HIBYTE(HIWORD(p)))
#define GetBGRAPixel(b,s,x,y) (((LPDWORD)(((LPBYTE)b) + y * s))[x])
int main()
HRESULT Direct3D9TakeScreenshot(UINT adapter, LPBYTE *pBuffer, UINT *pStride, const RECT *pInputRc = nullptr)
{
if (!pBuffer || !pStride) return E_INVALIDARG;
HRESULT hr = S_OK;
IDirect3D9 *d3d = nullptr;
IDirect3DDevice9 *device = nullptr;
IDirect3DSurface9 *surface = nullptr;
D3DPRESENT_PARAMETERS parameters = { 0 };
D3DDISPLAYMODE mode;
D3DLOCKED_RECT rc;
*pBuffer = NULL;
*pStride = 0;
// init D3D and get screen size
d3d = Direct3DCreate9(D3D_SDK_VERSION);
HRCHECK(d3d->GetAdapterDisplayMode(adapter, &mode));
LONG width = pInputRc ? (pInputRc->right - pInputRc->left) : mode.Width;
LONG height = pInputRc ? (pInputRc->bottom - pInputRc->top) : mode.Height;
parameters.Windowed = TRUE;
parameters.BackBufferCount = 1;
parameters.BackBufferHeight = height;
parameters.BackBufferWidth = width;
parameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
parameters.hDeviceWindow = NULL;
// create device & capture surface (note it needs desktop size, not our capture size)
HRCHECK(d3d->CreateDevice(adapter, D3DDEVTYPE_HAL, NULL, D3DCREATE_SOFTWARE_VERTEXPROCESSING, &parameters, &device));
HRCHECK(device->CreateOffscreenPlainSurface(mode.Width, mode.Height, D3DFMT_A8R8G8B8, D3DPOOL_SYSTEMMEM, &surface, nullptr));
// get pitch/stride to compute the required buffer size
HRCHECK(surface->LockRect(&rc, pInputRc, 0));
*pStride = rc.Pitch;
HRCHECK(surface->UnlockRect());
// allocate buffer
*pBuffer = (LPBYTE)LocalAlloc(0, *pStride * height);
if (!*pBuffer)
{
hr = E_OUTOFMEMORY;
goto cleanup;
}
// get the data
HRCHECK(device->GetFrontBufferData(0, surface));
// copy it into our buffer
HRCHECK(surface->LockRect(&rc, pInputRc, 0));
CopyMemory(*pBuffer, rc.pBits, rc.Pitch * height);
HRCHECK(surface->UnlockRect());
cleanup:
if (FAILED(hr))
{
if (*pBuffer)
{
LocalFree(*pBuffer);
*pBuffer = NULL;
}
*pStride = 0;
}
RELEASE(surface);
RELEASE(device);
RELEASE(d3d);
return hr;
}

Why is locked bits returning -842150451 for all pixel values?

I am trying to get the bits in a bitmap, but I keep getting this output (PS. I tested the whole array as well):
-842150451 // Array before lockbits
-842150451 // Array after lockbits
This is my code to get the lockedBits.
BitmapData * getLockedBitmapData()
{
float squareSideLength = 50 * 4;
Bitmap * src = new Bitmap(squareSideLength , squareSideLength);
Graphics * graphics = Graphics::FromImage(solid);
SolidBrush blackBrush(Color(255, 0, 0, 0));
graphics->FillRectangle(&blackBrush, FLOAT_ZERO, FLOAT_ZERO, squareSideLength, squareSideLength);
int srcWidth = src->GetWidth();
int srcHeight = src->GetHeight();
UINT * pixels = new UINT[srcWidth * srcHeight];
// _RPT1(0, "%d\n", pixels[55]);
BitmapData * bitmapData = new BitmapData();
bitmapData->Width = srcWidth;
bitmapData->Height = srcHeight;
bitmapData->Stride = 4 * srcWidth;
bitmapData->PixelFormat = PixelFormat32bppARGB;
bitmapData->Scan0 = (VOID*) pixels;
bitmapData->Reserved = NULL;
src->LockBits(new Rect(0, 0, srcWidth, srcHeight),
ImageLockMode::ImageLockModeRead | ImageLockMode::ImageLockModeWrite,
src->GetPixelFormat(),
bitmapData);
// _RPT1(0, "%d\n", pixels[55]);
return bitmapData;
}
You are using it wrong, it returns a BitmapData. So it needs to be:
BitmapData bitmapData;
Status ret = src->LockBits(new Rect(0, 0, srcWidth, srcHeight),
ImageLockMode::ImageLockModeRead | ImageLockMode::ImageLockModeWrite,
src->GetPixelFormat(),
&bitmapData);
if (ret != Ok) {
// Report error
//...
}
Not not skip error checking.

Read pixels from a HDC

I'm trying to read all pixels on a given area of a HDC to find if a color is present, currently I came up with:
IDirect3DSurface9* pSurface = 0;
p1->CreateOffscreenPlainSurface(1280, 1024,D3DFMT_A8R8G8B8, D3DPOOL_SYSTEMMEM, &pSurface, NULL);
p1->GetFrontBufferData(0, pSurface);
//assert( pSurface );
if( pSurface && GetTickCount() > dwGAKS_Fix )
{
HDC dc;
pSurface->GetDC( &dc );
COLORREF dpurp = D3DCOLOR_ARGB (255,102,0 ,153);
for( DWORD h = 610; h <= 670; h++ )
{
for( DWORD w = 480; w<=530; w++ )
{
COLORREF dwPixel = GetPixel( dc, h, w );
// CString strPixel; strPixel.Format( "Pixel col: %u at: %u X %u", dwPixel, d, i );
//if( dx_Font )
if( dwPixel == dpurp )
{
dx_Font->DrawTextA(NULL, "Shoot", strlen("Shoot"), &pos, DT_NOCLIP, D3DCOLOR_XRGB(0, 255, 0));
}
else
dx_Font->DrawTextA(NULL, "NoShoot", strlen("NoShoot"), &pos, DT_NOCLIP, D3DCOLOR_XRGB(0, 255, 0));
}
}
dwGAKS_Fix = GetTickCount() + 15;
pSurface->ReleaseDC( dc );
pSurface->Release();
But this solution is slow, very slow, I need something somewhat more..uh professional
edit
D3DLOCKED_RECT d3dlocked;
if( D3D_OK == pSurface->LockRect( &d3dlocked, 0, 0 ) )
{
UINT *pixels=(UINT *)locked.pBits;
if(pixels[52+15*1024]&0xFFFFFF00==dpurp)
{
}
pSurface->UnlockRect();
}
GetPixel is always slow. You can get direct access to the bits in the off-screen surface using IDirect3DSurface9::LockRect and then scan through the bitmap yourself, which should be much quicker.
(Edit) Any given pixel (x,y) is the 32 bit value found at:
*(DWORD*)(((BYTE*)d3dlocked.pBits) + y * d3dlocked.Pitch + x * sizeof(DWORD));
You should AND the value with 0x00ffffff to ignore the alpha channel.

Display a image in a MFC/C++ application using OpenCV

I would like to display in a MFC application, frames that I capture from an avi file with OpenCV (cvCaptureFromAVI function).
I'm new to MFC but feel like I'm close to making it work. But instead of the frames being displayed in the picture box they are displayed in a new window.
cvGetWindowName returns always a null value.
There is my code:
CWnd* hPic = 0;
hPic = GetDlgItem(IDC_STATICPIC1);
const char* szWindName = cvGetWindowName(hPic->GetSafeHwnd());
cvShowImage(szWindName, frame_copy);
So I found something to make it work after long researches.
The solution is to create the window and then insert it inside the picture box. I'm not sure it's good practice but I haven't found anything better for now.
cvNamedWindow("IDC_STATIC_OUTPUT", 0);
cvResizeWindow("IDC_STATIC_OUTPUT", 420, 240);
HWND hWnd = (HWND) cvGetWindowHandle("IDC_STATIC_OUTPUT");
HWND hParent = ::GetParent(hWnd);
::SetParent(hWnd, GetDlgItem(IDC_PIC1)->m_hWnd);
::ShowWindow(hParent, SW_HIDE);
cvShowImage("IDC_STATIC_OUTPUT", frame_copy);
In this case the picture box is called IDC_PIC1 and frame_copy is a OpenCV IplImage.
Hope this helps somebody.
Using the following code you can convert Mat to CImage and then display CImage everywhere you want:
int Mat2CImage(Mat *mat, CImage &img){
if(!mat || mat->empty())
return -1;
int nBPP = mat->channels()*8;
img.Create(mat->cols, mat->rows, nBPP);
if(nBPP == 8)
{
static RGBQUAD pRGB[256];
for (int i = 0; i < 256; i++)
pRGB[i].rgbBlue = pRGB[i].rgbGreen = pRGB[i].rgbRed = i;
img.SetColorTable(0, 256, pRGB);
}
uchar* psrc = mat->data;
uchar* pdst = (uchar*) img.GetBits();
int imgPitch = img.GetPitch();
for(int y = 0; y < mat->rows; y++)
{
memcpy(pdst, psrc, mat->cols*mat->channels());//mat->step is incorrect for those images created by roi (sub-images!)
psrc += mat->step;
pdst += imgPitch;
}
return 0;
}
NOTE: If you use the StretchDIBits() method with the BITMAPINFO approach, you MUST be aware that StretchDIBits() expects the raw OpenCV Mat::data pointer to have row lengths in even multiples of 4 bytes! If not, you'll get freaky shearing when you try to copy the data to the DC via StretchDIBits() - where the image is not only sheered along an angle, but the colors are all be trashed as well.
Here is my completely working edition of the code, which also supports maintaining image aspect ratio in the target control's rectangle. It can probably be made a bit faster, but it works for now:
void AdjustAspectImageSize( const Size& imageSize,
const Size& destSize,
Size& newSize )
{
double destAspectRatio = float( destSize.width ) / float( destSize.height );
double imageAspectRatio = float( imageSize.width ) / float( imageSize.height );
if ( imageAspectRatio > destAspectRatio )
{
// Margins on top/bottom
newSize.width = destSize.width;
newSize.height = int( imageSize.height *
( double( destSize.width ) / double( imageSize.width ) ) );
}
else
{
// Margins on left/right
newSize.height = destSize.height;
newSize.width = int( imageSize.width *
( double( destSize.height ) / double( imageSize.height ) ) );
}
}
void DrawPicToHDC( Mat cvImg,
UINT nDlgID,
bool bMaintainAspectRatio /* =true*/ )
{
// Get the HDC handle information from the ID passed
CDC* pDC = GetDlgItem(nDlgID)->GetDC();
HDC hDC = pDC->GetSafeHdc();
CRect rect;
GetDlgItem(nDlgID)->GetClientRect(rect);
Size winSize( rect.right, rect.bottom );
// Calculate the size of the image that
// will fit in the control rectangle.
Size origImageSize( cvImg.cols, cvImg.rows );
Size imageSize;
int offsetX;
int offsetY;
if ( ! bMaintainAspectRatio )
{
// Image should be the same size as the control's rectangle
imageSize = winSize;
}
else
{
Size newSize;
_AdjustAspectImageSize( origImageSize,
winSize,
imageSize );
}
offsetX = ( winSize.width - imageSize.width ) / 2;
offsetY = ( winSize.height - imageSize.height ) / 2;
// Resize the source to the size of the destination image if necessary
Mat cvImgTmp;
resize( cvImg,
cvImgTmp,
imageSize,
0,
0,
INTER_AREA );
// To handle our Mat object of this width, the source rows must
// be even multiples of a DWORD in length to be compatible with
// SetDIBits(). Calculate what the correct byte width of the
// row should be to be compatible with SetDIBits() below.
int stride = ( ( ( ( imageSize.width * 24 ) + 31 ) & ~31 ) >> 3 );
// Allocate a buffer for our DIB bits
uchar* pcDibBits = (uchar*) malloc( imageSize.height * stride );
if ( pcDibBits != NULL )
{
// Copy the raw pixel data over to our dibBits buffer.
// NOTE: Can setup cvImgTmp to add the padding to skip this.
for ( int row = 0; row < cvImgTmp.rows; ++row )
{
// Get pointers to the beginning of the row on both buffers
uchar* pcSrcPixel = cvImgTmp.ptr<uchar>(row);
uchar* pcDstPixel = pcDibBits + ( row * stride );
// We can just use memcpy
memcpy( pcDstPixel,
pcSrcPixel,
stride );
}
// Initialize the BITMAPINFO structure
BITMAPINFO bitInfo;
bitInfo.bmiHeader.biBitCount = 24;
bitInfo.bmiHeader.biWidth = cvImgTmp.cols;
bitInfo.bmiHeader.biHeight = -cvImgTmp.rows;
bitInfo.bmiHeader.biPlanes = 1;
bitInfo.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bitInfo.bmiHeader.biCompression = BI_RGB;
bitInfo.bmiHeader.biClrImportant = 0;
bitInfo.bmiHeader.biClrUsed = 0;
bitInfo.bmiHeader.biSizeImage = 0; //winSize.height * winSize.width * * 3;
bitInfo.bmiHeader.biXPelsPerMeter = 0;
bitInfo.bmiHeader.biYPelsPerMeter = 0;
// Add header and OPENCV image's data to the HDC
StretchDIBits( hDC,
offsetX,
offsetY,
cvImgTmp.cols,
cvImgTmp.rows,
0,
0,
cvImgTmp.cols,
cvImgTmp.rows,
pcDibBits,
& bitInfo,
DIB_RGB_COLORS,
SRCCOPY );
free(pcDibBits);
}
ReleaseDC(pDC);
}
int DrawImageToHDC(IplImage* img, HDC hdc, int xDest, int yDest, UINT iUsage, DWORD rop)
char m_chBmpBuf[2048];
BITMAPINFO *m_pBmpInfo = 0;
m_pBmpInfo = (BITMAPINFO*)m_chBmpBuf;
m_pBmpInfo->bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
m_pBmpInfo->bmiHeader.biWidth = img->width;
m_pBmpInfo->bmiHeader.biHeight = -img->height;
m_pBmpInfo->bmiHeader.biBitCount = 24;
m_pBmpInfo->bmiHeader.biPlanes = 1;
m_pBmpInfo->bmiHeader.biCompression = BI_RGB;
m_pBmpInfo->bmiHeader.biSizeImage = 0;
m_pBmpInfo->bmiHeader.biXPelsPerMeter = 0;
m_pBmpInfo->bmiHeader.biYPelsPerMeter = 0;
m_pBmpInfo->bmiHeader.biClrUsed = 0;
m_pBmpInfo->bmiHeader.biClrImportant = 0;
return StretchDIBits(hdc, xDest, yDest, img->width, img->height, 0, 0,
img->width, img->height, img->imageData, m_pBmpInfo, DIB_RGB_COLORS, SRCCOPY);
Usage: DrawImageToHDC(img, pDC->m_hDC, Area.left, Area.top, DIB_RGB_COLORS, SRCCOPY);