Convert unsigned char * to FlyCapture2 Image for cvShowImage OpenCV - c++

I'm using three ptgrey cameras to acquire images and save them into my hard disk.
I'm using the MultiCameraWriteToDiskEx example and it works great but I would like also to display images during the acquisition so I'm trying to convert FlyCapture images to a readeable OpenCV format in order to show them with cvShowImage() or imshow() functions.
I have a function IplImage* ConvertImageToOpenCV(Image* pImage) which can convert Flycapture2 Image* to OpenCV IplImage* but I do not know how to correctly convert unsigned char * to FlyCapture2::Image* in doGrabLoop() function which capture frames from the cameras.
Can you help me, please?
I'm not very confident with C/C++ :(
In particular, I do not know how to convert g_arImageplus[ uiCamera ].image.pData in order to pass it to ConvertImageToOpenCV().
(Is it correct to use g_arImageplus[ uiCamera ].image.pData?)
IplImage* ConvertImageToOpenCV(Image* pImage)
{
IplImage* cvImage = NULL;
bool bColor = true;
CvSize mySize;
mySize.height = pImage->GetRows();
mySize.width = pImage->GetCols();
printf("ciao %d\n", pImage->GetPixelFormat() );
switch ( pImage->GetPixelFormat() )
{
case PIXEL_FORMAT_MONO8: cvImage = cvCreateImageHeader(mySize, 8, 1 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_411YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_422YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_444YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RGB8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_MONO16: cvImage = cvCreateImageHeader(mySize, 16, 1 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_RGB16: cvImage = cvCreateImageHeader(mySize, 16, 3 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_S_MONO16: cvImage = cvCreateImageHeader(mySize, 16, 1 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_S_RGB16: cvImage = cvCreateImageHeader(mySize, 16, 3 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RAW8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RAW16: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_MONO12: printf("Not supported by OpenCV");
bColor = false;
break;
case PIXEL_FORMAT_RAW12: printf("Not supported by OpenCV");
break;
case PIXEL_FORMAT_BGR: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_BGRU: cvImage = cvCreateImageHeader(mySize, 8, 4 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 4;
break;
case PIXEL_FORMAT_RGBU: cvImage = cvCreateImageHeader(mySize, 8, 4 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 4;
break;
default: printf("Some error occured...\n");
return NULL;
}
if(bColor) {
if(!bInitialized)
{
colorImage.SetData(new unsigned char[pImage->GetCols() * pImage->GetRows()*3], pImage->GetCols() * pImage->GetRows()*3);
bInitialized = true;
}
pImage->Convert(PIXEL_FORMAT_BGR, &colorImage); //needs to be as BGR to be saved
cvImage->width = colorImage.GetCols();
cvImage->height = colorImage.GetRows();
cvImage->widthStep = colorImage.GetStride();
cvImage->origin = 0; //interleaved color channels
cvImage->imageDataOrigin = (char*)colorImage.GetData(); //DataOrigin and Data same pointer, no ROI
cvImage->imageData = (char*)(colorImage.GetData());
cvImage->widthStep = colorImage.GetStride();
cvImage->nSize = sizeof (IplImage);
cvImage->imageSize = cvImage->height * cvImage->widthStep;
}
else
{
cvImage->imageDataOrigin = (char*)(pImage->GetData());
cvImage->imageData = (char*)(pImage->GetData());
cvImage->widthStep = pImage->GetStride();
cvImage->nSize = sizeof (IplImage);
cvImage->imageSize = cvImage->height * cvImage->widthStep;
//at this point cvImage contains a valid IplImage
}
return cvImage;
}
//
// Grab and test loop
//
int doGrabLoop()
{
FlyCaptureError error = FLYCAPTURE_FAILED;
unsigned int aruiPrevSeqNum[ _MAX_CAMERAS ];
unsigned int aruiDelta[ _MAX_CAMERAS ];
unsigned int aruiCycles[ _MAX_CAMERAS ];
HANDLE arhFile[ _MAX_CAMERAS ];
DWORD ardwBytesWritten[ _MAX_CAMERAS ];
DWORD dwTotalKiloBytesWritten = 0;
bool bMissed = false;
bool bOutOfSync = false;
unsigned int uiMissedImages = 0;
unsigned int uiOutOfSyncImages = 0;
__int64 nStartTime = 0;
__int64 nEndTime = 0;
__int64 nDifference = 0;
__int64 nTotalTime = 0;
__int64 nGlobalStartTime = 0;
__int64 nGlobalEndTime = 0;
__int64 nGlobalTotalTime = 0;
__int64 nFrequency = 0;
QueryPerformanceFrequency( (LARGE_INTEGER*)&nFrequency );
QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalStartTime );
printf( "Starting grab...\n" );
// Create files to write to
if ( createFiles( arhFile ) != 0 )
{
printf( "There was error creating the files\n" );
return -1;
}
BOOL bSuccess;
//
// Start grabbing the images
//
for( int iImage = 0; iImage < g_iNumImagesToGrab; iImage++ )
{
#ifdef _VERBOSE
printf( "Grabbing image %u\n", iImage );
#else
printf( "." );
#endif
unsigned int uiCamera = 0;
// Grab an image from each camera
for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
error = flycaptureLockNext( g_arContext[uiCamera], &g_arImageplus[uiCamera] );
_HANDLE_ERROR( error, "flycaptureLockNext()" );
// Save image dimensions & bayer info from first image for each camera
if(iImage == 0)
{
g_arImageTemplate[uiCamera] = g_arImageplus[uiCamera].image;
error = flycaptureGetColorTileFormat(g_arContext[uiCamera], &g_arBayerTile[uiCamera]);
_HANDLE_ERROR( error, "flycaptureGetColorTileFormat()" );
}
}
for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
// Start timer
QueryPerformanceCounter( (LARGE_INTEGER*)&nStartTime );
// Calculate the size of the image to be written
int iImageSize = 0;
int iRowInc = g_arImageplus[uiCamera].image.iRowInc;
int iRows = g_arImageplus[uiCamera].image.iRows;
iImageSize = iRowInc * iRows;
// ERROR: HOW CAN I CONVERT g_arImageplus[ uiCamera ].image.pData IN ORDER TO USE IT WITH ConvertImageToOpenCV() function?
IplImage* destImage = ConvertImageToOpenCV(g_arImageplus[ uiCamera ].image.pData);
cvShowImage("prova", destImage);
waitKey(1);
// Write to the file
bSuccess = WriteFile(
arhFile[uiCamera],
g_arImageplus[ uiCamera ].image.pData,
iImageSize,
&ardwBytesWritten[uiCamera],
NULL );
// End timer
QueryPerformanceCounter( (LARGE_INTEGER*)&nEndTime );
// Ensure that the write was successful
if ( !bSuccess || ( ardwBytesWritten[uiCamera] != (unsigned)iImageSize ) )
{
printf( "Error writing to file for camera %u!\n", uiCamera );
return -1;
}
// Update various counters
dwTotalKiloBytesWritten += (ardwBytesWritten[uiCamera] / 1024);
nDifference = nEndTime - nStartTime;
nTotalTime += nDifference;
// Keep track of the difference in image sequence numbers (uiSeqNum)
// in order to determine if any images have been missed. A difference
// greater than 1 indicates that an image has been missed.
if( iImage == 0 )
{
// This is the first image, set up the variables
aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;
aruiDelta[uiCamera] = 1;
}
else
{
// Get the difference in sequence numbers between the current
// image and the last image we received
aruiDelta[uiCamera] =
g_arImageplus[uiCamera].uiSeqNum - aruiPrevSeqNum[uiCamera];
}
if( aruiDelta[uiCamera] != 1 )
{
// We have missed an image.
bMissed = true;
uiMissedImages += aruiDelta[uiCamera] - 1;
}
else
{
bMissed = false;
}
aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;
// Calculate the cycle count for the camera
aruiCycles[uiCamera] =
g_arImageplus[uiCamera].image.timeStamp.ulCycleSeconds * 8000 +
g_arImageplus[uiCamera].image.timeStamp.ulCycleCount;
// Determine the difference of the timestamp for every image from the
// first camera. If the difference is greater than 1 cycle count,
// register the camera as being out of synchronization.
int iDeltaFrom0 = abs( (int)(aruiCycles[uiCamera] - aruiCycles[0]) );
if( ( iDeltaFrom0 % ( 128 * 8000 - 1 ) ) > 1 )
{
bOutOfSync = true;
uiOutOfSyncImages++;
}
else
{
bOutOfSync = false;
}
#ifdef _VERBOSE
// Output is in the following order:
// - The index of the image being captured
// - The index of the camera that is currently being captured
// - The time taken to write the image to disk
// - Number of kilobytes written
// - Write speed (in MB/s)
// - Sequence number
// - Cycle seconds in timestamp
// - Cycle count in timestamp
// - Delta from 0th value
// - Missed an image?
// - Out of sync?
printf(
"%04d: \t%02u\t%0.5f\t%.0lf\t%.2lf\t%04u\t%03u.%04u\t%d\t%s %s\n",
iImage,
uiCamera,
nDifference,
(double)ardwBytesWritten[ uiCamera ] / 1024.0,
(double)ardwBytesWritten[ uiCamera ] / ( 1024 * 1024 * nDifference ),
g_arImageplus[ uiCamera ].uiSeqNum,
g_arImageplus[ uiCamera ].image.timeStamp.ulCycleSeconds,
g_arImageplus[ uiCamera ].image.timeStamp.ulCycleCount,
iDeltaFrom0,
bMissed ? "Y" : "N",
bOutOfSync ? "Y" : "N");
#endif
}
// Unlock image, handing the buffer back to the buffer pool.
for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
error = flycaptureUnlock(
g_arContext[uiCamera], g_arImageplus[uiCamera].uiBufferIndex );
_HANDLE_ERROR( error, "flycaptureUnlock()" );
}
}
//
// Done grabbing images
//
QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalEndTime );
nGlobalTotalTime = nGlobalEndTime - nGlobalStartTime;
double dGlobalTotalTime = (double)nGlobalTotalTime / (double)nFrequency;
double dTotalTime = (double)nTotalTime / (double)nFrequency;
// Report on the results
// Burst time is the time that was spent writing to disk only
// Overall time is total time taken, including image grabs, calculations etc
printf(
"\nBurst: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n",
(double)( dwTotalKiloBytesWritten / 1024 ),
dTotalTime,
(double)( dwTotalKiloBytesWritten / ( 1024 * dTotalTime ) ) );
printf(
"Overall: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n",
(double)( dwTotalKiloBytesWritten / 1024 ),
dGlobalTotalTime,
(double)( dwTotalKiloBytesWritten / ( 1024 * dGlobalTotalTime ) ) );
printf( g_bSyncSuccess ? "Sync success\n" : "Sync failed\n" );
printf( "Missed images = %u.\n", uiMissedImages );
printf( "Out of sync images = %u.\n", uiOutOfSyncImages );
for ( unsigned int uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
// Close file handles
CloseHandle(arhFile[uiCamera]);
}
return 0;
}

Your conversion method:
IplImage* ConvertImageToOpenCV(Image* pImage)
takes an Image pointer. Construct a new Image to pass to it from the data you have. Here's the doc for the Image class. If you use this function, it will not hold onto the buffer or copy it:
Image ( unsigned int rows,
unsigned int cols,
unsigned int stride,
unsigned char* pData,
unsigned int dataSize,
PixelFormat format,
BayerTileFormat bayerFormat = NONE
)
So your code would look something like:
// Pull these values from your other image structure
Image image(rows, cols, stride, pData, dataSize, format, bayerFormat);
// Pass the address of the temporary image to your conversion
IplImage* opencvImage = ConvertImageToOpenCV(&image);

Related

vector subscript out of range C++ OpenCV camera calibration

I am trying Martin Peris's OpenCV stereo camera calibration code from this link (http://blog.martinperis.com/2011/01/opencv-stereo-camera-calibration.html). When I use his given camera calibration images it works fine. But when I try my images it gives the following error:
Debug Assertion Failed!
Program: C"\Windows\system32\MSVCP110D.dll
File: C:\Program Files(x86)\Microsoft Visual Studio 11.0\include\vector\
Line: 1140
Expression: vector subscript out of range
I am running the code on Windows 64 bit. I built the project using CMAKE. The only way I know to run this program is to open developer command prompt for VS2012. Then CD to the debug directory of my project. Then call the program using this command: "stereo_camera_calibrate list.txt 9 6 2.2" as explained in the blog linked above. The problem with running this console application through the command prompt is that I DO NOT KNOW WHERE THE ERROR HAPPENED IN THE CODE!
Update: I just added couple of lines of code to check if the image is loaded or not. apparently the images are not being loaded since I get this msg on screen "image not loaded"! my images are BMP. is there any problems with loading .bmp images using cvLoadImage?
here is the code, any help is appreciated
#pragma warning( disable: 4996 )
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at:
http://sourceforge.net/projects/opencvlibrary/
* The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back):
http://opencvlibrary.sourceforge.net/
* An active user group is at:
http://tech.groups.yahoo.com/group/OpenCV/
* The minutes of weekly OpenCV development meetings are at:
http://pr.willowgarage.com/wiki/OpenCV
************************************************** */
/*
Modified by Martin Peris Martorell (info#martinperis.com) in order to accept some configuration
parameters and store all the calibration data as xml files.
*/
#include "cv.h"
#include "cxmisc.h"
#include "highgui.h"
#include "cvaux.h"
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* imageList, int nx, int ny, int useUncalibrated, float _squareSize)
{
int displayCorners = 1;
int showUndistorted = 1;
bool isVerticalStereo = false;//OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = _squareSize; //Chessboard square size in cm
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes, n = nx*ny, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<int> npoints;
vector<uchar> active[2];
vector<CvPoint2D32f> temp(n);
CvSize imageSize = {0,0};
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
double Q[4][4];
CvMat _M1 = cvMat(3, 3, CV_64F, M1 );
CvMat _M2 = cvMat(3, 3, CV_64F, M2 );
CvMat _D1 = cvMat(1, 5, CV_64F, D1 );
CvMat _D2 = cvMat(1, 5, CV_64F, D2 );
CvMat _R = cvMat(3, 3, CV_64F, R );
CvMat _T = cvMat(3, 1, CV_64F, T );
CvMat _E = cvMat(3, 3, CV_64F, E );
CvMat _F = cvMat(3, 3, CV_64F, F );
CvMat _Q = cvMat(4,4, CV_64F, Q);
if( displayCorners )
cvNamedWindow( "corners", 1 );
// READ IN THE LIST OF CHESSBOARDS:
if( !f )
{
fprintf(stderr, "can not open file %s\n", imageList );
return;
}
for(i=0;;i++)
{
char buf[1024];
int count = 0, result=0;
lr = i % 2;
vector<CvPoint2D32f>& pts = points[lr];
if( !fgets( buf, sizeof(buf)-3, f ))
break;
size_t len = strlen(buf);
while( len > 0 && isspace(buf[len-1]))
buf[--len] = '\0';
if( buf[0] == '#')
continue;
IplImage* img = cvLoadImage( buf, 0 );
if(img == NULL)
printf("image not loaded \n");
else
printf("Loaded image \n");
if( !img )
break;
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for( int s = 1; s <= maxScale; s++ )
{
IplImage* timg = img;
if( s > 1 )
{
timg = cvCreateImage(cvSize(img->width*s,img->height*s),
img->depth, img->nChannels );
cvResize( img, timg, CV_INTER_CUBIC );
}
result = cvFindChessboardCorners( timg, cvSize(nx, ny),
&temp[0], &count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE);
if( timg != img )
cvReleaseImage( &timg );
if( result || s == maxScale )
for( j = 0; j < count; j++ )
{
temp[j].x /= s;
temp[j].y /= s;
}
if( result )
break;
}
if( displayCorners )
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage( imageSize, 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
cvDrawChessboardCorners( cimg, cvSize(nx, ny), &temp[0],
count, result );
cvShowImage( "corners", cimg );
cvReleaseImage( &cimg );
if( cvWaitKey(0) == 27 ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
N = pts.size();
pts.resize(N + n, cvPoint2D32f(0,0));
active[lr].push_back((uchar)result);
//assert( result != 0 );
if( result )
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix( img, &temp[0], count,
cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
30, 0.01) );
copy( temp.begin(), temp.end(), pts.begin() + N );
}
cvReleaseImage( &img );
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
nframes = active[0].size();//Number of good chessboads found
objectPoints.resize(nframes*n);
for( i = 0; i < ny; i++ )
for( j = 0; j < nx; j++ )
objectPoints[i*nx + j] = cvPoint3D32f(i*squareSize, j*squareSize, 0);
for( i = 1; i < nframes; i++ )
copy( objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n );
npoints.resize(nframes,n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate( &_objectPoints, &_imagePoints1,
&_imagePoints2, &_npoints,
&_M1, &_D1, &_M2, &_D2,
imageSize, &_R, &_T, &_E, &_F,
cvTermCriteria(CV_TERMCRIT_ITER+
CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH );
printf(" done\n");
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints( &_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1 );
cvUndistortPoints( &_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2 );
cvComputeCorrespondEpilines( &_imagePoints1, 1, &_F, &_L1 );
cvComputeCorrespondEpilines( &_imagePoints2, 2, &_F, &_L2 );
double avgErr = 0;
for( i = 0; i < N; i++ )
{
double err = fabs(points[0][i].x*lines[1][i].x +
points[0][i].y*lines[1][i].y + lines[1][i].z)
+ fabs(points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z);
avgErr += err;
}
printf( "avg err = %g\n", avgErr/(nframes*n) );
//COMPUTE AND DISPLAY RECTIFICATION
if( showUndistorted )
{
CvMat* mx1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* mx2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* img1r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* img2r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* disp = cvCreateMat( imageSize.height,
imageSize.width, CV_16S );
CvMat* vdisp = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* pair;
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useUncalibrated == 0 )
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify( &_M1, &_M2, &_D1, &_D2, imageSize,
&_R, &_T,
&_R1, &_R2, &_P1, &_P2, &_Q,
0/*CV_CALIB_ZERO_DISPARITY*/ );
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
//Precompute maps for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_P1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_P2,mx2,my2);
//Save parameters
cvSave("M1.xml",&_M1);
cvSave("D1.xml",&_D1);
cvSave("R1.xml",&_R1);
cvSave("P1.xml",&_P1);
cvSave("M2.xml",&_M2);
cvSave("D2.xml",&_D2);
cvSave("R2.xml",&_R2);
cvSave("P2.xml",&_P2);
cvSave("Q.xml",&_Q);
cvSave("mx1.xml",mx1);
cvSave("my1.xml",my1);
cvSave("mx2.xml",mx2);
cvSave("my2.xml",my2);
}
//OR ELSE HARTLEY'S METHOD
else if( useUncalibrated == 1 || useUncalibrated == 2 )
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
//Just to show you could have independently used F
if( useUncalibrated == 2 )
cvFindFundamentalMat( &_imagePoints1,
&_imagePoints2, &_F);
cvStereoRectifyUncalibrated( &_imagePoints1,
&_imagePoints2, &_F,
imageSize,
&_H1, &_H2, 3);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D1,&_R2,&_M2,mx2,my2);
}
else
assert(0);
cvNamedWindow( "rectified", 1 );
// RECTIFY THE IMAGES AND FIND DISPARITY MAPS
if( !isVerticalStereo )
pair = cvCreateMat( imageSize.height, imageSize.width*2,
CV_8UC3 );
else
pair = cvCreateMat( imageSize.height*2, imageSize.width,
CV_8UC3 );
//Setup for finding stereo corrrespondences
CvStereoBMState *BMState = cvCreateStereoBMState();
assert(BMState != 0);
BMState->preFilterSize=41;
BMState->preFilterCap=31;
BMState->SADWindowSize=41;
BMState->minDisparity=-64;
BMState->numberOfDisparities=128;
BMState->textureThreshold=10;
BMState->uniquenessRatio=15;
for( i = 0; i < nframes; i++ )
{
IplImage* img1=cvLoadImage(imageNames[0][i].c_str(),0);
IplImage* img2=cvLoadImage(imageNames[1][i].c_str(),0);
if( img1 && img2 )
{
CvMat part;
cvRemap( img1, img1r, mx1, my1 );
cvRemap( img2, img2r, mx2, my2 );
if( !isVerticalStereo || useUncalibrated != 0 )
{
// When the stereo camera is oriented vertically,
// useUncalibrated==0 does not transpose the
// image, so the epipolar lines in the rectified
// images are vertical. Stereo correspondence
// function does not support such a case.
cvFindStereoCorrespondenceBM( img1r, img2r, disp,
BMState);
cvNormalize( disp, vdisp, 0, 256, CV_MINMAX );
cvNamedWindow( "disparity" );
cvShowImage( "disparity", vdisp );
}
if( !isVerticalStereo )
{
cvGetCols( pair, &part, 0, imageSize.width );
cvCvtColor( img1r, &part, CV_GRAY2BGR );
cvGetCols( pair, &part, imageSize.width,
imageSize.width*2 );
cvCvtColor( img2r, &part, CV_GRAY2BGR );
for( j = 0; j < imageSize.height; j += 16 )
cvLine( pair, cvPoint(0,j),
cvPoint(imageSize.width*2,j),
CV_RGB(0,255,0));
}
else
{
cvGetRows( pair, &part, 0, imageSize.height );
cvCvtColor( img1r, &part, CV_GRAY2BGR );
cvGetRows( pair, &part, imageSize.height,
imageSize.height*2 );
cvCvtColor( img2r, &part, CV_GRAY2BGR );
for( j = 0; j < imageSize.width; j += 16 )
cvLine( pair, cvPoint(j,0),
cvPoint(j,imageSize.height*2),
CV_RGB(0,255,0));
}
cvShowImage( "rectified", pair );
if( cvWaitKey() == 27 )
break;
}
cvReleaseImage( &img1 );
cvReleaseImage( &img2 );
}
cvReleaseStereoBMState(&BMState);
cvReleaseMat( &mx1 );
cvReleaseMat( &my1 );
cvReleaseMat( &mx2 );
cvReleaseMat( &my2 );
cvReleaseMat( &img1r );
cvReleaseMat( &img2r );
cvReleaseMat( &disp );
}
}
int main(int argc, char *argv[])
{
int nx, ny;
float squareSize;
int fail = 0;
//Check command line
if (argc != 5)
{
fprintf(stderr,"USAGE: %s imageList nx ny squareSize\n",argv[0]);
fprintf(stderr,"\t imageList : Filename of the image list (string). Example : list.txt\n");
fprintf(stderr,"\t nx : Number of horizontal squares (int > 0). Example : 9\n");
fprintf(stderr,"\t ny : Number of vertical squares (int > 0). Example : 6\n");
fprintf(stderr,"\t squareSize : Size of a square (float > 0). Example : 2.5\n");
return 1;
}
nx = atoi(argv[2]);
ny = atoi(argv[3]);
squareSize = (float)atof(argv[4]);
if (nx <= 0)
{
fail = 1;
fprintf(stderr, "ERROR: nx value can not be <= 0\n");
}
if (ny <= 0)
{
fail = 1;
fprintf(stderr, "ERROR: ny value can not be <= 0\n");
}
if (squareSize <= 0.0)
{
fail = 1;
fprintf(stderr, "ERROR: squareSize value can not be <= 0\n");
}
if(fail != 0) return 1;
StereoCalib(argv[1], nx, ny, 0, squareSize);
return 0;
}

C++ How to create a bitmap file

I am trying to figure out how to create a bitmap file in C++ VS. Currently I have taken in the file name and adding the ".bmp" extension to create the file. I want to know how I could change the pixels of the file by making it into different colors or patterns (ie. like a checkerboard) This is my function that I have and I believe that I have to send 3 different Bytes at a time in order to establish the color of the pixel.
void makeCheckerboardBMP(string fileName, int squaresize, int n) {
ofstream ofs;
ofs.open(fileName + ".bmp");
writeHeader(ofs, n, n);
for(int row = 0; row < n; row++) {
for(int col = 0; col < n; col++) {
if(col % 2 == 0) {
ofs << 0;
ofs << 0;
ofs << 0;
} else {
ofs << 255;
ofs << 255;
ofs << 255;
}
}
}
}
void writeHeader(ostream& out, int width, int height){
if (width % 4 != 0) {
cerr << "ERROR: There is a windows-imposed requirement on BMP that the width be a
multiple of 4.\n";
cerr << "Your width does not meet this requirement, hence this will fail. You can fix
this\n";
cerr << "by increasing the width to a multiple of 4." << endl;
exit(1);
}
BITMAPFILEHEADER tWBFH;
tWBFH.bfType = 0x4d42;
tWBFH.bfSize = 14 + 40 + (width*height*3);
tWBFH.bfReserved1 = 0;
tWBFH.bfReserved2 = 0;
tWBFH.bfOffBits = 14 + 40;
BITMAPINFOHEADER tW2BH;
memset(&tW2BH,0,40);
tW2BH.biSize = 40;
tW2BH.biWidth = width;
tW2BH.biHeight = height;
tW2BH.biPlanes = 1;
tW2BH.biBitCount = 24;
tW2BH.biCompression = 0;
out.write((char*)(&tWBFH),14);
out.write((char*)(&tW2BH),40);
}
These are the two functions I am using for my code (one greyscale, one RGB saving).
Might give you a hint whats going wrong.
Note: they are done to work, not to be efficient.
void SaveBitmapToFile( BYTE* pBitmapBits, LONG lWidth, LONG lHeight,WORD wBitsPerPixel, LPCTSTR lpszFileName )
{
RGBQUAD palette[256];
for(int i = 0; i < 256; ++i)
{
palette[i].rgbBlue = (byte)i;
palette[i].rgbGreen = (byte)i;
palette[i].rgbRed = (byte)i;
}
BITMAPINFOHEADER bmpInfoHeader = {0};
// Set the size
bmpInfoHeader.biSize = sizeof(BITMAPINFOHEADER);
// Bit count
bmpInfoHeader.biBitCount = wBitsPerPixel;
// Use all colors
bmpInfoHeader.biClrImportant = 0;
// Use as many colors according to bits per pixel
bmpInfoHeader.biClrUsed = 0;
// Store as un Compressed
bmpInfoHeader.biCompression = BI_RGB;
// Set the height in pixels
bmpInfoHeader.biHeight = lHeight;
// Width of the Image in pixels
bmpInfoHeader.biWidth = lWidth;
// Default number of planes
bmpInfoHeader.biPlanes = 1;
// Calculate the image size in bytes
bmpInfoHeader.biSizeImage = lWidth* lHeight * (wBitsPerPixel/8);
BITMAPFILEHEADER bfh = {0};
// This value should be values of BM letters i.e 0x4D42
// 0x4D = M 0×42 = B storing in reverse order to match with endian
bfh.bfType = 'B'+('M' << 8);
// <<8 used to shift ‘M’ to end
// Offset to the RGBQUAD
bfh.bfOffBits = sizeof(BITMAPINFOHEADER) + sizeof(BITMAPFILEHEADER) + sizeof(RGBQUAD) * 256;
// Total size of image including size of headers
bfh.bfSize = bfh.bfOffBits + bmpInfoHeader.biSizeImage;
// Create the file in disk to write
HANDLE hFile = CreateFile( lpszFileName,GENERIC_WRITE, 0,NULL,
CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,NULL);
if( !hFile ) // return if error opening file
{
return;
}
DWORD dwWritten = 0;
// Write the File header
WriteFile( hFile, &bfh, sizeof(bfh), &dwWritten , NULL );
// Write the bitmap info header
WriteFile( hFile, &bmpInfoHeader, sizeof(bmpInfoHeader), &dwWritten, NULL );
// Write the palette
WriteFile( hFile, &palette[0], sizeof(RGBQUAD) * 256, &dwWritten, NULL );
// Write the RGB Data
if(lWidth%4 == 0)
{
WriteFile( hFile, pBitmapBits, bmpInfoHeader.biSizeImage, &dwWritten, NULL );
}
else
{
char* empty = new char[ 4 - lWidth % 4];
for(int i = 0; i < lHeight; ++i)
{
WriteFile( hFile, &pBitmapBits[i * lWidth], lWidth, &dwWritten, NULL );
WriteFile( hFile, empty, 4 - lWidth % 4, &dwWritten, NULL );
}
}
// Close the file handle
CloseHandle( hFile );
}
void SaveBitmapToFileColor( BYTE* pBitmapBits, LONG lWidth, LONG lHeight,WORD wBitsPerPixel, LPCTSTR lpszFileName )
{
BITMAPINFOHEADER bmpInfoHeader = {0};
// Set the size
bmpInfoHeader.biSize = sizeof(BITMAPINFOHEADER);
// Bit count
bmpInfoHeader.biBitCount = wBitsPerPixel;
// Use all colors
bmpInfoHeader.biClrImportant = 0;
// Use as many colors according to bits per pixel
bmpInfoHeader.biClrUsed = 0;
// Store as un Compressed
bmpInfoHeader.biCompression = BI_RGB;
// Set the height in pixels
bmpInfoHeader.biHeight = lHeight;
// Width of the Image in pixels
bmpInfoHeader.biWidth = lWidth;
// Default number of planes
bmpInfoHeader.biPlanes = 1;
// Calculate the image size in bytes
bmpInfoHeader.biSizeImage = lWidth* lHeight * (wBitsPerPixel/8);
BITMAPFILEHEADER bfh = {0};
// This value should be values of BM letters i.e 0x4D42
// 0x4D = M 0×42 = B storing in reverse order to match with endian
bfh.bfType = 'B'+('M' << 8);
// <<8 used to shift ‘M’ to end
// Offset to the RGBQUAD
bfh.bfOffBits = sizeof(BITMAPINFOHEADER) + sizeof(BITMAPFILEHEADER);
// Total size of image including size of headers
bfh.bfSize = bfh.bfOffBits + bmpInfoHeader.biSizeImage;
// Create the file in disk to write
HANDLE hFile = CreateFile( lpszFileName,GENERIC_WRITE, 0,NULL,
CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL,NULL);
if( !hFile ) // return if error opening file
{
return;
}
DWORD dwWritten = 0;
// Write the File header
WriteFile( hFile, &bfh, sizeof(bfh), &dwWritten , NULL );
// Write the bitmap info header
WriteFile( hFile, &bmpInfoHeader, sizeof(bmpInfoHeader), &dwWritten, NULL );
// Write the palette
//WriteFile( hFile, &palette[0], sizeof(RGBQUAD) * 256, &dwWritten, NULL );
// Write the RGB Data
if(lWidth%4 == 0)
{
WriteFile( hFile, pBitmapBits, bmpInfoHeader.biSizeImage, &dwWritten, NULL );
}
else
{
char* empty = new char[ 4 - lWidth % 4];
for(int i = 0; i < lHeight; ++i)
{
WriteFile( hFile, &pBitmapBits[i * lWidth], lWidth, &dwWritten, NULL );
WriteFile( hFile, empty, 4 - lWidth % 4, &dwWritten, NULL );
}
}
// Close the file handle
CloseHandle( hFile );
}
Given your writeHeader is properly implemented this is almost correct. You need to fix 2 issues though:
You are writing one int per color channel. This should be one byte instead. You need to cast the literals to unsigned char.
Scanlines in bitmaps need to be DWORD-aligned. After your inner loop over col you need to write additional bytes to account for this, unless the size in bytes of the row is a multiple of four.
You need to force the output to be written in binary format, not text, this is chosen when you open your file/create your stream and to output all the values as bytes, not integers, this can be done in a number of ways possibly the easiest being write chr(0) or chr(255) - you also need to start your file with a header section - there are a number of formats that make this too long to go into in an answer here - some of them are down to preference as much as anything. There is a good summary in Wikipedia.
Basically you have to inform the receiving applications which format you are using, the number of rows, columns and how the colours are stored.

Why does it take a long time to record video to a file?

I developed a video capture application in which the CPU is 624MHz(ARM920T-PAX310), and RAM is 72MB. The captured video is 1 minute but it takes almost 10 minutes to save the video file. I set a breakpoint, and found it took a long time.
//------------------- set filename
HRESULT hResult;
CComPtr<IFileSinkFilter> pFileSinkFilter;
m_pMediaControl->Stop();
m_pMediaControl->Run();
CHK(m_pASFMultiplexer->QueryInterface(&pFileSinkFilter)); CHK(pFileSinkFilter->SetFileName(L"\\windows\\sample.asf",NULL));// pFileSinkFilter.Release();
//------------------start record
LONGLONG dwStart = 0, dwEnd = 0;
WORD wStartCookie = 1, wEndCookie = 2;
HRESULT hResult = 0;
if( m_pCaptureGraphBuilder == NULL )
{
return FALSE;
}
//
dwStart=0;
dwEnd=MAXLONGLONG;
hResult = m_pCaptureGraphBuilder->ControlStream( &PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, m_pVideoCaptureFilter, &dwStart, &dwEnd, wStartCookie, wEndCookie );
Sleep(60000);//1 minute
//------------------stop record
HRESULT hResult = S_OK;
LONGLONG dwStart = 0, dwEnd = 0;
WORD wStartCookie = 1, wEndCookie = 2;
LONG lEventCode = 0;
LONG lParam1 = 0;
LONG lParam2 = 0;
if( m_pCaptureGraphBuilder == NULL )
{
return FALSE;
}
//
dwStart = 0;
hResult = m_pMediaSeeking->GetCurrentPosition( &dwEnd );
hResult = m_pCaptureGraphBuilder->ControlStream( &PIN_CATEGORY_CAPTURE, &MEDIATYPE_Video, m_pVideoCaptureFilter, &dwStart, &dwEnd, wStartCookie, wEndCookie );
//
while (true)
{
m_pMediaEvent->GetEvent( &lEventCode, &lParam1, &lParam2, INFINITE );
m_pMediaEvent->FreeEventParams( lEventCode, lParam1, lParam2 );
if( lEventCode == EC_STREAM_CONTROL_STOPPED ) {
TRACE(L"Record stop\n");
break;
}
Sleep(100);
NKDbgPrintfW(L"lEventCode = %d dwEnd = %d\n",lEventCode,dwEnd);
}
I found file's size increase continous from "start record ". Why does it take a long time for "stop record" What should I do in "stop record"???

Am i incorrectly setting up avi info for saving a file in C++ vfw?

I am using a specialized network streaming camera and I am trying to save the video stream off in a file. at the moment the code saves the video but in an ackward RGB format which screws up the color and then saves it using VFW. Am i doing this correctly and this is supposed to create avi with mismatched colors or did i setup something wrong in the BITMAPINFOHEADER areas?
void PvSbProUISampleDlg::OnBnClickedSave()
{
// TODO: Add your control notification handler code here
CString StringValue;
mMovieSave.GetWindowTextW(StringValue);
if (StringValue == L"Save")
{
CString codecValue;
mMovieCodecSelected.GetWindowTextW(codecValue);
if (codecValue.IsEmpty()){
MessageBox( L"Please select a codec before saving to file",
L"Select Codec!",
MB_OK | MB_ICONEXCLAMATION );
return;
}
CString fileNameValue;
mFileName.GetWindowTextW(fileNameValue);
if (fileNameValue.IsEmpty()){
MessageBox( L"Please select a file location",
L"Select File!",
MB_OK | MB_ICONEXCLAMATION );
return;
}
if (!StartMovie())
return;
mSavingMovie = true;
mMovieSave.SetWindowTextW(L"Saving");
}
else
{
mVideoMutex.Lock();
PvResult aResult = mVideoCompressor->Stop();
mSavingMovie = false;
mVideoMutex.Unlock();
if (!aResult.IsOK())
{
MessageBox( mLocation,
L"Can't Stop Video Compressor!",
MB_OK | MB_ICONEXCLAMATION );
return;
}
mMovieSave.SetWindowTextW(L"Save");
}
}
I set up the video stream and select uncompressed AVI for my codec. I click "save" button which then calls the function below
bool PvSbProUISampleDlg::StartMovie()
{
if ( !mDevice.IsConnected() )
{
MessageBox( L"Need to connect to device",
L"Cannot start Video Compressor!",
MB_OK | MB_ICONEXCLAMATION );
return false;
}
if (!mPipeline.IsStarted() )
{
return false;
}
if (mSavingMovie)
return false;
PvInt64 width;
PvInt64 height;
PvInt64 bitCount;
if (!GetImageWidth(width).IsOK())
return false;
if (!GetImageHeight(height).IsOK())
return false;
if (!GetPixelBitCount(bitCount).IsOK())
return false;
// Start the movie compressor
if ( !mVideoCompressor->Start( mLocation,
width,
height,
bitCount/8,
59).IsOK())
{
MessageBox( mLocation,
L"Cannot start Video Compressor!",
MB_OK | MB_ICONEXCLAMATION );
return false;
}
return true;
}
the function gets the video size info and then calls the actually compression to start
PvResult VideoCompressor::Start(const CString& aFileName, unsigned short aSizeX, unsigned short aSizeY, unsigned short aBPP, double aFPS)
{
IAVIFile *lAVIFile = NULL;
IAVIStream *lAVIStream = NULL;
IAVIStream *lAVICompressedStream = NULL;
AVISTREAMINFO lAVISTREAMINFO;
AVICOMPRESSOPTIONS lAVICOMPRESSOPTIONS;
// Try to match the image format with the Video Compressor capabilities
BITMAPINFO lTempBI;
lTempBI.bmiHeader.biSize = sizeof( BITMAPINFO );
lTempBI.bmiHeader.biWidth = aSizeX;
lTempBI.bmiHeader.biHeight = aSizeY;
lTempBI.bmiHeader.biPlanes = 1;
lTempBI.bmiHeader.biBitCount = aBPP * 8;
lTempBI.bmiHeader.biCompression = BI_RGB;
lTempBI.bmiHeader.biSizeImage = aSizeX * aSizeY * aBPP;
lTempBI.bmiHeader.biXPelsPerMeter = 1280;
lTempBI.bmiHeader.biYPelsPerMeter = 720;
lTempBI.bmiHeader.biClrUsed = 0;
lTempBI.bmiHeader.biClrImportant = 0;
//lTempBI.bmiHeader.
if( ( mCOMPVARS.hic != NULL ) && // if not the "Full Frames (uncompressed)"
( ICCompressQuery( mCOMPVARS.hic, &lTempBI, NULL ) != ICERR_OK ) )
{
mLastVideoError = "Image format not accepted by compressor!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
// Try to open the stream for writing
if( mTempBuffer )
delete [] mTempBuffer;
mTempBuffer = new unsigned char[ aSizeX * aSizeY * aBPP ];
if( mTempBuffer == NULL )
{
mLastVideoError = "Cannot allocate memory for a temporary buffer!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
if( AVIFileOpen( &lAVIFile, aFileName, OF_CREATE | OF_WRITE, NULL ) != 0 )
{
mLastVideoError = "Cannot open movie file for writing!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
// Fill out AVIStream information
memset( &lAVISTREAMINFO, 0, sizeof( AVISTREAMINFO ) );
lAVISTREAMINFO.fccType = streamtypeVIDEO;
lAVISTREAMINFO.fccHandler = mCOMPVARS.fccHandler;
lAVISTREAMINFO.dwFlags = 0;
lAVISTREAMINFO.dwCaps = 0;
lAVISTREAMINFO.wPriority = 0;
lAVISTREAMINFO.wLanguage = 0;
lAVISTREAMINFO.dwScale = 100;
lAVISTREAMINFO.dwRate = (unsigned long)( aFPS * 100.0 );
lAVISTREAMINFO.dwStart = 0;
lAVISTREAMINFO.dwLength = 0;
lAVISTREAMINFO.dwInitialFrames = 0;
lAVISTREAMINFO.dwQuality = mCOMPVARS.lQ;
lAVISTREAMINFO.dwSuggestedBufferSize = aSizeX * aSizeY * aBPP;
lAVISTREAMINFO.dwSampleSize = aSizeX * aSizeY * aBPP;
SetRect(&lAVISTREAMINFO.rcFrame, 0, aSizeY, aSizeX, 0);
// Convert to a wchar_t*
char *orig = "Video Stream";
size_t origsize = strlen(orig) + 1;
const size_t newsize = 64;
size_t convertedChars = 0;
mbstowcs_s(&convertedChars, lAVISTREAMINFO.szName, origsize, orig, _TRUNCATE);
if( AVIFileCreateStream( lAVIFile, &lAVIStream, &lAVISTREAMINFO ) != 0 )
{
mLastVideoError = "Cannot create video stream!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
BITMAPINFOHEADER lBIH;
lBIH.biSize = sizeof( BITMAPINFOHEADER );
lBIH.biWidth = aSizeX;
lBIH.biHeight = aSizeY;
lBIH.biPlanes = 1;
lBIH.biBitCount = aBPP * 8;
lBIH.biCompression = BI_RGB;
lBIH.biSizeImage = aSizeX * aSizeY * aBPP;
lBIH.biXPelsPerMeter = 1280;
lBIH.biYPelsPerMeter = 720;
lBIH.biClrUsed = 0;
lBIH.biClrImportant = 0;
memset( &lAVICOMPRESSOPTIONS, 0, sizeof( AVICOMPRESSOPTIONS ) );
lAVICOMPRESSOPTIONS.fccType = streamtypeVIDEO;
lAVICOMPRESSOPTIONS.fccHandler = mCOMPVARS.fccHandler;
lAVICOMPRESSOPTIONS.dwKeyFrameEvery = 15;
lAVICOMPRESSOPTIONS.dwQuality = mCOMPVARS.lQ;
lAVICOMPRESSOPTIONS.dwBytesPerSecond = 0;
lAVICOMPRESSOPTIONS.dwFlags = AVICOMPRESSF_KEYFRAMES; //| AVICOMPRESSF_VALID;//|AVICOMPRESSF_DATARATE;
lAVICOMPRESSOPTIONS.lpFormat = &lBIH;
lAVICOMPRESSOPTIONS.cbFormat = sizeof( lBIH );
lAVICOMPRESSOPTIONS.lpParms = 0;
lAVICOMPRESSOPTIONS.cbParms = 0;
lAVICOMPRESSOPTIONS.dwInterleaveEvery = 0;
HRESULT lR = AVIMakeCompressedStream( &lAVICompressedStream, lAVIStream, &lAVICOMPRESSOPTIONS, NULL);
if( lR == AVIERR_NOCOMPRESSOR )
{
mLastVideoError = "Cannot find a suitable compressor!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
else if( lR == AVIERR_MEMORY )
{
mLastVideoError = "Not enough memory to start the compressor!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
else if( lR == AVIERR_UNSUPPORTED )
{
mLastVideoError = "Compression is not supported for this image buffer!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
if( AVIStreamSetFormat( lAVICompressedStream, 0, &lBIH, sizeof( lBIH ) ) != 0 )
{
mLastVideoError = "Cannot set stream format. It probably isn't supported by the Codec!";
CleanUp(lAVIFile, lAVIStream ,lAVICompressedStream);
return PvResult::Code::GENERIC_ERROR;
}
///////////////////
HRESULT hr;
//IBaseFilter mux = Null;
//IFileSinkFilter sink = null;
// Guid x = new Guid( 0xe436eb88, 0x524f, 0x11ce, 0x9f, 0x53, 0x00, 0x20, 0xaf, 0x0b, 0xa7, 0x70 );
//ICaptureGraphBuilder2::SetOutputFileName(
//////////////////
// finishing up
mAVIFile = lAVIFile;
mAVIStream = lAVIStream;
mAVICompressedStream = lAVICompressedStream;
mSizeX = aSizeX;
mSizeY = aSizeY;
mBPP = aBPP;
mImageSize = aSizeX * aSizeY * aBPP;
mLastSample = 0;
mCompressing = true;
return PvResult::Code::OK;
}
this compresses the stream
PvResult VideoCompressor::Compress(PvBuffer *aPvBuffer)
{
if (!mCompressing)
return PvResult::Code::GENERIC_ERROR;
ASSERT( mTempBuffer != NULL );
long lSamplesWritten, lBytesWritten;
int numberOfLines = 0;
PvUInt8 * aBuffer = aPvBuffer->GetDataPointer();
for( unsigned short lLine = 0; lLine < mSizeY; lLine++ )
{
numberOfLines = lLine;
unsigned char *lCurLine = (unsigned char *)aBuffer + (lLine ) * mSizeX * mBPP;
unsigned char *lCurLineInv = mTempBuffer + (mSizeY - lLine - 1) * mSizeX * mBPP;
::memcpy( lCurLineInv, lCurLine, mSizeX * mBPP );
}
if( AVIStreamWrite( mAVICompressedStream, mLastSample, 1, mTempBuffer, mImageSize, 0,
&lSamplesWritten, &lBytesWritten ) != 0 ||
lSamplesWritten < 1 ||
lBytesWritten < 1 )
{
mLastVideoError = "Cannot compress image!";
return PvResult::Code::GENERIC_ERROR;
}
mLastSample ++;
return PvResult::Code::OK;
}
this is what it should look like:
http://i13.photobucket.com/albums/a269/Masterg_/Untitled-16.png
this is what it saves as ( minus the guy):
http://i13.photobucket.com/albums/a269/Masterg_/vlcsnap-2011-06-07-13h11m34s97.png
From MSDN we have:
Syntax
DWORD ICCompressQuery(
hic,
lpbiInput,
lpbiOutput );
Parameters
hic : Handle to a compressor.
lpbiInput : Pointer to a BITMAPINFO structure containing the input format.
lpbiOutput : Pointer to a BITMAPINFO structure containing the output format. You can
specify zero for this parameter to
indicate any output format is
acceptable.
I might be wrong, but it seems to me that you are trying to "force" this input format whitout taking into account the actual format you are passing as input. If your input format does not match the "forced" one, weird result must be expected.
If your actual input format is not compatible with your compressor, you could try usign a ColorSpace converter filter before your compressor.

failure to create a DirectX device and swapchain

I am having issues retrieving a swapchain and device from directx. further info is in the code
void GXDX::StartUp(HWND* mainWindow,int w, int h)
{
//width and height are members of GXDX
width = w; //contains the width
height = h; //contains the height
this->mainWindow = mainWindow; // Is a handle to the main window. it is usually something
//like : unusual -735313406
ID3D10Texture2D *backBufferSurface;
DXGI_SWAP_CHAIN_DESC swapChainDesc;
swapChainDesc.BufferCount = 2;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.OutputWindow = *mainWindow;
swapChainDesc.Windowed = TRUE;
D3D10_DRIVER_TYPE driverType = D3D10_DRIVER_TYPE_HARDWARE;
HRESULT hr = D3D10CreateDeviceAndSwapChain(NULL,driverType,NULL,0,
D3D10_SDK_VERSION, &swapChainDesc,&swapChain,&dxDevice);
if(FAILED(hr))//Error is here
throw GXVideoException(L"Problems retrieving directX device");
}
Below is all the values given after the method has finished
- &dxDevice 0x00e74b04 ID3D10Device * *
- 0x00000000 ID3D10Device *
- IUnknown {...} IUnknown
__vfptr CXX0030: Error: expression cannot be evaluated
- &swapChain 0x00e74b08 IDXGISwapChain * *
- 0x00000000 IDXGISwapChain *
- IDXGIDeviceSubObject {...} IDXGIDeviceSubObject
- IDXGIObject {...} IDXGIObject
- IUnknown {...} IUnknown
__vfptr CXX0030: Error: expression cannot be evaluated
- &swapChainDesc 0x002df90c {BufferDesc={...} SampleDesc={...} BufferUsage=0xcccccccc ...} DXGI_SWAP_CHAIN_DESC *
- BufferDesc {Width=0x00000320 Height=0x00000258 RefreshRate={...} ...} DXGI_MODE_DESC
Width 800 unsigned int
Height 600 unsigned int
- RefreshRate {Numerator=60 Denominator=1 } DXGI_RATIONAL
Numerator 60 unsigned int
Denominator 1 unsigned int
Format DXGI_FORMAT_R8G8B8A8_UNORM DXGI_FORMAT
ScanlineOrdering -858993460 DXGI_MODE_SCANLINE_ORDER
Scaling -858993460 DXGI_MODE_SCALING
- SampleDesc {Count=0x00000001 Quality=0x00000000 } DXGI_SAMPLE_DESC
Count 1 unsigned int
Quality 0 unsigned int
BufferUsage 3435973836 unsigned int
BufferCount 2 unsigned int
- OutputWindow 0x008b08ca {unused=-665779669 } HWND__ *
unused -665779669 int
Windowed 1 int
SwapEffect -858993460 DXGI_SWAP_EFFECT
Flags 3435973836 unsigned int
driverType D3D10_DRIVER_TYPE_HARDWARE D3D10_DRIVER_TYPE
hr 0x887a0001 HRESULT
- this 0x00e74af0 {dxDevice=0x00000000 swapChain=0x00000000 } GXDX * const
- GXRenderer {running=true width=0x00000320 height=0x00000258 ...} GXRenderer
- __vfptr 0x013277dc const GXDX::`vftable' *
[0] 0x0132110e GXDX::Render(void) *
[0x1] 0x013211d6 GXDX::StartUp(struct HWND__ * *,int,int) *
[0x2] 0x01321041 GXDX::SetupScene(void) *
[0x3] 0x01321069 GXDX::DisplayScene(void) *
running true bool
width 0x00000320 int
height 0x00000258 int
- mainWindow 0x0132a214 struct HWND__ * GXRenderManager::mainWindow {unused=0x008b08ca } HWND__ *
unused 0x008b08ca int
- dxDevice 0x00000000 ID3D10Device *
+ IUnknown {...} IUnknown
- swapChain 0x00000000 IDXGISwapChain *
- IDXGIDeviceSubObject {...} IDXGIDeviceSubObject
- IDXGIObject {...} IDXGIObject
- IUnknown {...} IUnknown
__vfptr CXX0030: Error: expression cannot be evaluated
[EDIT]
Prior to Goz response, I checked out further debug detail and this is what was recieved
DXGI Error: IDXGIFactory::CreateSwapChain: SwapEffect is unknown.
which I am guessing I did not add the swapeffect attributes. I will do that and check it out
Silly me. earlier I mentioned in a comment that both books I was reading did not include a swapchain effect property. I knew something was off regardless of if I included the swap chain or not.
But I also notice that both books zeroed out the swap chain description. Making it more safe to leave out properties. So I added the following
SecureZeroMemory(&swapChainDesc, sizeof(swapChainDesc));
and everything worked. Set aside this, I should still add a swapchain effect property. But for reason's the book decided not to, i have not figured out yet.
The following code worked for me (I'm using Microsoft Visual Studio Express 2012 for Windows Desktop)
DXGI_SWAP_CHAIN_DESC swapChainDesc;
swapChainDesc.BufferDesc.Width = width;
swapChainDesc.BufferDesc.Height = height;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferDesc.RefreshRate.Numerator = 60;
swapChainDesc.BufferDesc.RefreshRate.Denominator = 1;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 1;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.Windowed = true;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
swapChainDesc.Flags = 0;
ID3D11Device *pDevice = 0;
ID3D11DeviceContext *pContext = 0;
IDXGISwapChain *pSwapChain = 0;
HRESULT result;
result = D3D11CreateDeviceAndSwapChain(NULL, driverType, NULL, D3D11_CREATE_DEVICE_SINGLETHREADED, featureLevel, totalFeatureLevel, D3D11_SDK_VERSION, &swapChainDesc, &pSwapChain, &pDevice, &currentFeatureLevel, &pContext);
So you can just try adding more attributes to the swapChainDesc.
Try not passing a pointer to your HWND through. You should pass an HWND as a non-pointer. Other than that I don't see anything massively wrong.
This is what DXErrorLookup says about your error:
HRESULT: 0x887a0001 (2289696769)
Name: DXGI_ERROR_INVALID_CALL
Description: The application has made an erroneous API call that it had enough
information to avoid. This error is intended to denote that the application should be
altered to avoid the error. Use of the debug version of the DXGI.DLL will provide run-
time debug output with further information.
Severity code: Failed
Facility Code: FACILITY_DXGI (2170)
Error Code: 0x0001 (1)
So have you considered using the debug version of DXGI to see what the error is?
Btw my working DX10 initialisation is as follows (Warning a LOT of code!):
HRESULT hr = S_OK;
// Wrong init params passed in.
if ( pParams->paramSize != sizeof( D3D10InitParams ) )
return false;
// Upgrade the initparams to the correct version
mInitParams = *(D3D10InitParams*)pParams;
// Create factory.
IDXGIFactory* pFactory = NULL;
if ( FAILED( CreateDXGIFactory( __uuidof( IDXGIFactory ), (void**)&pFactory ) ) )
{
return false;
}
if ( FAILED( pFactory->MakeWindowAssociation( mInitParams.hWnd, 0 ) ) )
{
return false;
}
HWND hTemp;
pFactory->GetWindowAssociation( &hTemp );
// Enumerate adapters.
unsigned int count = 0;
IDXGIAdapter * pAdapter;
std::vector<IDXGIAdapter*> vAdapters;
while( pFactory->EnumAdapters( count, &pAdapter ) != DXGI_ERROR_NOT_FOUND )
{
vAdapters.push_back( pAdapter );
count++;
}
unsigned int selectedAdapter = mInitParams.display;
if ( vAdapters.size() > 1 )
{
// Need to handle multiple available adapters.
}
// Release all other adapters.
count = 0;
unsigned int max = (unsigned int)vAdapters.size();
while( count < max )
{
if ( count != selectedAdapter )
{
vAdapters[count]->Release();
}
count++;
}
// Device should support all basic DX10 features.
// Caps does not support enough basic features.
//if ( !CheckCaps( &caps ) )
// return false;
// Create the D3D 10 device.
DXGI_MODE_DESC dxgiModeDesc;
dxgiModeDesc.Width = mInitParams.width;
dxgiModeDesc.Height = mInitParams.height;
dxgiModeDesc.RefreshRate.Numerator = (mInitParams.refreshRate == 0) ? 60 : mInitParams.refreshRate;
dxgiModeDesc.RefreshRate.Denominator = 1;
dxgiModeDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
dxgiModeDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_PROGRESSIVE;
dxgiModeDesc.Scaling = DXGI_MODE_SCALING_CENTERED;
DXGI_SAMPLE_DESC dxgiSampleDesc;
dxgiSampleDesc.Count = 1;
dxgiSampleDesc.Quality = 0;
//DXGI_USAGE dxgiUsage;
//dxgiUsage.
DXGI_SWAP_CHAIN_DESC dxgiSwapChainDesc;
dxgiSwapChainDesc.BufferDesc = dxgiModeDesc;
dxgiSwapChainDesc.SampleDesc = dxgiSampleDesc;
dxgiSwapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
dxgiSwapChainDesc.BufferCount = 2;
dxgiSwapChainDesc.OutputWindow = mInitParams.hWnd;
dxgiSwapChainDesc.Windowed = mInitParams.windowed;
dxgiSwapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
dxgiSwapChainDesc.Flags = 0;//DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
// Set the device as a debug device when compiled for debug.
#ifdef _DEBUG
unsigned int flags = D3D10_CREATE_DEVICE_DEBUG;
#else
unsigned int flags = 0;
#endif
mpAdapter = vAdapters[selectedAdapter];
// Create the device and swap chain.
if ( FAILED( D3D10CreateDeviceAndSwapChain( mpAdapter, D3D10_DRIVER_TYPE_HARDWARE, NULL, flags, D3D10_SDK_VERSION, &dxgiSwapChainDesc, &mpSwapChain, &mpDevice ) ) )
{
return false;
}
// Get the back buffer.
ID3D10Texture2D* pBuffer = NULL;
if ( FAILED( mpSwapChain->GetBuffer( 0, __uuidof( ID3D10Texture2D ), (void**)&pBuffer ) ) )
{
return false;
}
// Create the default render target view.
hr = mpDevice->CreateRenderTargetView( pBuffer, NULL, &mDefaultRenderTarget );
pBuffer->Release();
if ( FAILED( hr ) )
{
return false;
}
// Create depth stencil texture
D3D10_TEXTURE2D_DESC descDepth;
descDepth.Width = mInitParams.width;
descDepth.Height = mInitParams.height;
descDepth.MipLevels = 1;
descDepth.ArraySize = 1;
descDepth.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
descDepth.SampleDesc.Count = 1;
descDepth.SampleDesc.Quality = 0;
descDepth.Usage = D3D10_USAGE_DEFAULT;
descDepth.BindFlags = D3D10_BIND_DEPTH_STENCIL;
descDepth.CPUAccessFlags = 0;
descDepth.MiscFlags = 0;
if ( FAILED( mpDevice->CreateTexture2D( &descDepth, NULL, &mpDepthStencilTex ) ) )
{
return false;
}
// Create the depth stencil view
D3D10_DEPTH_STENCIL_VIEW_DESC descDSV;
descDSV.Format = descDepth.Format;
descDSV.ViewDimension = D3D10_DSV_DIMENSION_TEXTURE2D;
descDSV.Texture2D.MipSlice = 0;
if ( FAILED( mpDevice->CreateDepthStencilView( mpDepthStencilTex, &descDSV, &mDefaultDepthStencilTarget ) ) )
{
return false;
}
// Set the default render targets.
mpDevice->OMSetRenderTargets( 1, &mDefaultRenderTarget, mDefaultDepthStencilTarget );
mpEffectDevice = new D3D10EffectStateDevice( GetDevice() );
// Set the default render states.
SetupRenderStates();
// Set the default viewport.
D3D10_VIEWPORT d3d10ViewPort;
d3d10ViewPort.Width = mInitParams.width;
d3d10ViewPort.Height = mInitParams.height;
d3d10ViewPort.TopLeftX = 0;
d3d10ViewPort.TopLeftY = 0;
d3d10ViewPort.MinDepth = 0.0f;
d3d10ViewPort.MaxDepth = 1.0f;
GetDevice()->RSSetViewports( 1, &d3d10ViewPort );
I hope thats some help!