vector subscript out of range C++ OpenCV camera calibration - c++

I am trying Martin Peris's OpenCV stereo camera calibration code from this link (http://blog.martinperis.com/2011/01/opencv-stereo-camera-calibration.html). When I use his given camera calibration images it works fine. But when I try my images it gives the following error:
Debug Assertion Failed!
Program: C"\Windows\system32\MSVCP110D.dll
File: C:\Program Files(x86)\Microsoft Visual Studio 11.0\include\vector\
Line: 1140
Expression: vector subscript out of range
I am running the code on Windows 64 bit. I built the project using CMAKE. The only way I know to run this program is to open developer command prompt for VS2012. Then CD to the debug directory of my project. Then call the program using this command: "stereo_camera_calibrate list.txt 9 6 2.2" as explained in the blog linked above. The problem with running this console application through the command prompt is that I DO NOT KNOW WHERE THE ERROR HAPPENED IN THE CODE!
Update: I just added couple of lines of code to check if the image is loaded or not. apparently the images are not being loaded since I get this msg on screen "image not loaded"! my images are BMP. is there any problems with loading .bmp images using cvLoadImage?
here is the code, any help is appreciated
#pragma warning( disable: 4996 )
/* *************** License:**************************
Oct. 3, 2008
Right to use this code in any way you want without warrenty, support or any guarentee of it working.
BOOK: It would be nice if you cited it:
Learning OpenCV: Computer Vision with the OpenCV Library
by Gary Bradski and Adrian Kaehler
Published by O'Reilly Media, October 3, 2008
AVAILABLE AT:
http://www.amazon.com/Learning-OpenCV-Computer-Vision-Library/dp/0596516134
Or: http://oreilly.com/catalog/9780596516130/
ISBN-10: 0596516134 or: ISBN-13: 978-0596516130
OTHER OPENCV SITES:
* The source code is on sourceforge at:
http://sourceforge.net/projects/opencvlibrary/
* The OpenCV wiki page (As of Oct 1, 2008 this is down for changing over servers, but should come back):
http://opencvlibrary.sourceforge.net/
* An active user group is at:
http://tech.groups.yahoo.com/group/OpenCV/
* The minutes of weekly OpenCV development meetings are at:
http://pr.willowgarage.com/wiki/OpenCV
************************************************** */
/*
Modified by Martin Peris Martorell (info#martinperis.com) in order to accept some configuration
parameters and store all the calibration data as xml files.
*/
#include "cv.h"
#include "cxmisc.h"
#include "highgui.h"
#include "cvaux.h"
#include <vector>
#include <string>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
using namespace std;
//
// Given a list of chessboard images, the number of corners (nx, ny)
// on the chessboards, and a flag: useCalibrated for calibrated (0) or
// uncalibrated (1: use cvStereoCalibrate(), 2: compute fundamental
// matrix separately) stereo. Calibrate the cameras and display the
// rectified results along with the computed disparity images.
//
static void
StereoCalib(const char* imageList, int nx, int ny, int useUncalibrated, float _squareSize)
{
int displayCorners = 1;
int showUndistorted = 1;
bool isVerticalStereo = false;//OpenCV can handle left-right
//or up-down camera arrangements
const int maxScale = 1;
const float squareSize = _squareSize; //Chessboard square size in cm
FILE* f = fopen(imageList, "rt");
int i, j, lr, nframes, n = nx*ny, N = 0;
vector<string> imageNames[2];
vector<CvPoint3D32f> objectPoints;
vector<CvPoint2D32f> points[2];
vector<int> npoints;
vector<uchar> active[2];
vector<CvPoint2D32f> temp(n);
CvSize imageSize = {0,0};
// ARRAY AND VECTOR STORAGE:
double M1[3][3], M2[3][3], D1[5], D2[5];
double R[3][3], T[3], E[3][3], F[3][3];
double Q[4][4];
CvMat _M1 = cvMat(3, 3, CV_64F, M1 );
CvMat _M2 = cvMat(3, 3, CV_64F, M2 );
CvMat _D1 = cvMat(1, 5, CV_64F, D1 );
CvMat _D2 = cvMat(1, 5, CV_64F, D2 );
CvMat _R = cvMat(3, 3, CV_64F, R );
CvMat _T = cvMat(3, 1, CV_64F, T );
CvMat _E = cvMat(3, 3, CV_64F, E );
CvMat _F = cvMat(3, 3, CV_64F, F );
CvMat _Q = cvMat(4,4, CV_64F, Q);
if( displayCorners )
cvNamedWindow( "corners", 1 );
// READ IN THE LIST OF CHESSBOARDS:
if( !f )
{
fprintf(stderr, "can not open file %s\n", imageList );
return;
}
for(i=0;;i++)
{
char buf[1024];
int count = 0, result=0;
lr = i % 2;
vector<CvPoint2D32f>& pts = points[lr];
if( !fgets( buf, sizeof(buf)-3, f ))
break;
size_t len = strlen(buf);
while( len > 0 && isspace(buf[len-1]))
buf[--len] = '\0';
if( buf[0] == '#')
continue;
IplImage* img = cvLoadImage( buf, 0 );
if(img == NULL)
printf("image not loaded \n");
else
printf("Loaded image \n");
if( !img )
break;
imageSize = cvGetSize(img);
imageNames[lr].push_back(buf);
//FIND CHESSBOARDS AND CORNERS THEREIN:
for( int s = 1; s <= maxScale; s++ )
{
IplImage* timg = img;
if( s > 1 )
{
timg = cvCreateImage(cvSize(img->width*s,img->height*s),
img->depth, img->nChannels );
cvResize( img, timg, CV_INTER_CUBIC );
}
result = cvFindChessboardCorners( timg, cvSize(nx, ny),
&temp[0], &count,
CV_CALIB_CB_ADAPTIVE_THRESH |
CV_CALIB_CB_NORMALIZE_IMAGE);
if( timg != img )
cvReleaseImage( &timg );
if( result || s == maxScale )
for( j = 0; j < count; j++ )
{
temp[j].x /= s;
temp[j].y /= s;
}
if( result )
break;
}
if( displayCorners )
{
printf("%s\n", buf);
IplImage* cimg = cvCreateImage( imageSize, 8, 3 );
cvCvtColor( img, cimg, CV_GRAY2BGR );
cvDrawChessboardCorners( cimg, cvSize(nx, ny), &temp[0],
count, result );
cvShowImage( "corners", cimg );
cvReleaseImage( &cimg );
if( cvWaitKey(0) == 27 ) //Allow ESC to quit
exit(-1);
}
else
putchar('.');
N = pts.size();
pts.resize(N + n, cvPoint2D32f(0,0));
active[lr].push_back((uchar)result);
//assert( result != 0 );
if( result )
{
//Calibration will suffer without subpixel interpolation
cvFindCornerSubPix( img, &temp[0], count,
cvSize(11, 11), cvSize(-1,-1),
cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
30, 0.01) );
copy( temp.begin(), temp.end(), pts.begin() + N );
}
cvReleaseImage( &img );
}
fclose(f);
printf("\n");
// HARVEST CHESSBOARD 3D OBJECT POINT LIST:
nframes = active[0].size();//Number of good chessboads found
objectPoints.resize(nframes*n);
for( i = 0; i < ny; i++ )
for( j = 0; j < nx; j++ )
objectPoints[i*nx + j] = cvPoint3D32f(i*squareSize, j*squareSize, 0);
for( i = 1; i < nframes; i++ )
copy( objectPoints.begin(), objectPoints.begin() + n,
objectPoints.begin() + i*n );
npoints.resize(nframes,n);
N = nframes*n;
CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
cvSetIdentity(&_M1);
cvSetIdentity(&_M2);
cvZero(&_D1);
cvZero(&_D2);
// CALIBRATE THE STEREO CAMERAS
printf("Running stereo calibration ...");
fflush(stdout);
cvStereoCalibrate( &_objectPoints, &_imagePoints1,
&_imagePoints2, &_npoints,
&_M1, &_D1, &_M2, &_D2,
imageSize, &_R, &_T, &_E, &_F,
cvTermCriteria(CV_TERMCRIT_ITER+
CV_TERMCRIT_EPS, 100, 1e-5),
CV_CALIB_FIX_ASPECT_RATIO +
CV_CALIB_ZERO_TANGENT_DIST +
CV_CALIB_SAME_FOCAL_LENGTH );
printf(" done\n");
// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
vector<CvPoint3D32f> lines[2];
points[0].resize(N);
points[1].resize(N);
_imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
_imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
lines[0].resize(N);
lines[1].resize(N);
CvMat _L1 = cvMat(1, N, CV_32FC3, &lines[0][0]);
CvMat _L2 = cvMat(1, N, CV_32FC3, &lines[1][0]);
//Always work in undistorted space
cvUndistortPoints( &_imagePoints1, &_imagePoints1,
&_M1, &_D1, 0, &_M1 );
cvUndistortPoints( &_imagePoints2, &_imagePoints2,
&_M2, &_D2, 0, &_M2 );
cvComputeCorrespondEpilines( &_imagePoints1, 1, &_F, &_L1 );
cvComputeCorrespondEpilines( &_imagePoints2, 2, &_F, &_L2 );
double avgErr = 0;
for( i = 0; i < N; i++ )
{
double err = fabs(points[0][i].x*lines[1][i].x +
points[0][i].y*lines[1][i].y + lines[1][i].z)
+ fabs(points[1][i].x*lines[0][i].x +
points[1][i].y*lines[0][i].y + lines[0][i].z);
avgErr += err;
}
printf( "avg err = %g\n", avgErr/(nframes*n) );
//COMPUTE AND DISPLAY RECTIFICATION
if( showUndistorted )
{
CvMat* mx1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my1 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* mx2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* my2 = cvCreateMat( imageSize.height,
imageSize.width, CV_32F );
CvMat* img1r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* img2r = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* disp = cvCreateMat( imageSize.height,
imageSize.width, CV_16S );
CvMat* vdisp = cvCreateMat( imageSize.height,
imageSize.width, CV_8U );
CvMat* pair;
double R1[3][3], R2[3][3], P1[3][4], P2[3][4];
CvMat _R1 = cvMat(3, 3, CV_64F, R1);
CvMat _R2 = cvMat(3, 3, CV_64F, R2);
// IF BY CALIBRATED (BOUGUET'S METHOD)
if( useUncalibrated == 0 )
{
CvMat _P1 = cvMat(3, 4, CV_64F, P1);
CvMat _P2 = cvMat(3, 4, CV_64F, P2);
cvStereoRectify( &_M1, &_M2, &_D1, &_D2, imageSize,
&_R, &_T,
&_R1, &_R2, &_P1, &_P2, &_Q,
0/*CV_CALIB_ZERO_DISPARITY*/ );
isVerticalStereo = fabs(P2[1][3]) > fabs(P2[0][3]);
//Precompute maps for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_P1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_P2,mx2,my2);
//Save parameters
cvSave("M1.xml",&_M1);
cvSave("D1.xml",&_D1);
cvSave("R1.xml",&_R1);
cvSave("P1.xml",&_P1);
cvSave("M2.xml",&_M2);
cvSave("D2.xml",&_D2);
cvSave("R2.xml",&_R2);
cvSave("P2.xml",&_P2);
cvSave("Q.xml",&_Q);
cvSave("mx1.xml",mx1);
cvSave("my1.xml",my1);
cvSave("mx2.xml",mx2);
cvSave("my2.xml",my2);
}
//OR ELSE HARTLEY'S METHOD
else if( useUncalibrated == 1 || useUncalibrated == 2 )
// use intrinsic parameters of each camera, but
// compute the rectification transformation directly
// from the fundamental matrix
{
double H1[3][3], H2[3][3], iM[3][3];
CvMat _H1 = cvMat(3, 3, CV_64F, H1);
CvMat _H2 = cvMat(3, 3, CV_64F, H2);
CvMat _iM = cvMat(3, 3, CV_64F, iM);
//Just to show you could have independently used F
if( useUncalibrated == 2 )
cvFindFundamentalMat( &_imagePoints1,
&_imagePoints2, &_F);
cvStereoRectifyUncalibrated( &_imagePoints1,
&_imagePoints2, &_F,
imageSize,
&_H1, &_H2, 3);
cvInvert(&_M1, &_iM);
cvMatMul(&_H1, &_M1, &_R1);
cvMatMul(&_iM, &_R1, &_R1);
cvInvert(&_M2, &_iM);
cvMatMul(&_H2, &_M2, &_R2);
cvMatMul(&_iM, &_R2, &_R2);
//Precompute map for cvRemap()
cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
cvInitUndistortRectifyMap(&_M2,&_D1,&_R2,&_M2,mx2,my2);
}
else
assert(0);
cvNamedWindow( "rectified", 1 );
// RECTIFY THE IMAGES AND FIND DISPARITY MAPS
if( !isVerticalStereo )
pair = cvCreateMat( imageSize.height, imageSize.width*2,
CV_8UC3 );
else
pair = cvCreateMat( imageSize.height*2, imageSize.width,
CV_8UC3 );
//Setup for finding stereo corrrespondences
CvStereoBMState *BMState = cvCreateStereoBMState();
assert(BMState != 0);
BMState->preFilterSize=41;
BMState->preFilterCap=31;
BMState->SADWindowSize=41;
BMState->minDisparity=-64;
BMState->numberOfDisparities=128;
BMState->textureThreshold=10;
BMState->uniquenessRatio=15;
for( i = 0; i < nframes; i++ )
{
IplImage* img1=cvLoadImage(imageNames[0][i].c_str(),0);
IplImage* img2=cvLoadImage(imageNames[1][i].c_str(),0);
if( img1 && img2 )
{
CvMat part;
cvRemap( img1, img1r, mx1, my1 );
cvRemap( img2, img2r, mx2, my2 );
if( !isVerticalStereo || useUncalibrated != 0 )
{
// When the stereo camera is oriented vertically,
// useUncalibrated==0 does not transpose the
// image, so the epipolar lines in the rectified
// images are vertical. Stereo correspondence
// function does not support such a case.
cvFindStereoCorrespondenceBM( img1r, img2r, disp,
BMState);
cvNormalize( disp, vdisp, 0, 256, CV_MINMAX );
cvNamedWindow( "disparity" );
cvShowImage( "disparity", vdisp );
}
if( !isVerticalStereo )
{
cvGetCols( pair, &part, 0, imageSize.width );
cvCvtColor( img1r, &part, CV_GRAY2BGR );
cvGetCols( pair, &part, imageSize.width,
imageSize.width*2 );
cvCvtColor( img2r, &part, CV_GRAY2BGR );
for( j = 0; j < imageSize.height; j += 16 )
cvLine( pair, cvPoint(0,j),
cvPoint(imageSize.width*2,j),
CV_RGB(0,255,0));
}
else
{
cvGetRows( pair, &part, 0, imageSize.height );
cvCvtColor( img1r, &part, CV_GRAY2BGR );
cvGetRows( pair, &part, imageSize.height,
imageSize.height*2 );
cvCvtColor( img2r, &part, CV_GRAY2BGR );
for( j = 0; j < imageSize.width; j += 16 )
cvLine( pair, cvPoint(j,0),
cvPoint(j,imageSize.height*2),
CV_RGB(0,255,0));
}
cvShowImage( "rectified", pair );
if( cvWaitKey() == 27 )
break;
}
cvReleaseImage( &img1 );
cvReleaseImage( &img2 );
}
cvReleaseStereoBMState(&BMState);
cvReleaseMat( &mx1 );
cvReleaseMat( &my1 );
cvReleaseMat( &mx2 );
cvReleaseMat( &my2 );
cvReleaseMat( &img1r );
cvReleaseMat( &img2r );
cvReleaseMat( &disp );
}
}
int main(int argc, char *argv[])
{
int nx, ny;
float squareSize;
int fail = 0;
//Check command line
if (argc != 5)
{
fprintf(stderr,"USAGE: %s imageList nx ny squareSize\n",argv[0]);
fprintf(stderr,"\t imageList : Filename of the image list (string). Example : list.txt\n");
fprintf(stderr,"\t nx : Number of horizontal squares (int > 0). Example : 9\n");
fprintf(stderr,"\t ny : Number of vertical squares (int > 0). Example : 6\n");
fprintf(stderr,"\t squareSize : Size of a square (float > 0). Example : 2.5\n");
return 1;
}
nx = atoi(argv[2]);
ny = atoi(argv[3]);
squareSize = (float)atof(argv[4]);
if (nx <= 0)
{
fail = 1;
fprintf(stderr, "ERROR: nx value can not be <= 0\n");
}
if (ny <= 0)
{
fail = 1;
fprintf(stderr, "ERROR: ny value can not be <= 0\n");
}
if (squareSize <= 0.0)
{
fail = 1;
fprintf(stderr, "ERROR: squareSize value can not be <= 0\n");
}
if(fail != 0) return 1;
StereoCalib(argv[1], nx, ny, 0, squareSize);
return 0;
}

Related

Opencv video writer only writes 128byte files (os x el capitan)

So i tried several pieces of code now, all more or less similar, and they all only produce a 128 byte file. I want to record the webcam stream to file.
I don't believe this is a codec issue, i tried all of them and i still get only 128 bytes. Anyone know what the problem here is ? So far i only tried it on MacOS X.
For example below code:
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
int main( int argc, char** argv ) {
CvCapture* capture;
capture = cvCreateCameraCapture(0);
assert( capture != NULL );
IplImage* bgr_frame = cvQueryFrame( capture );
CvSize size = cvSize(
(int)cvGetCaptureProperty( capture,
CV_CAP_PROP_FRAME_WIDTH),
(int)cvGetCaptureProperty( capture,
CV_CAP_PROP_FRAME_HEIGHT)
);
cvNamedWindow( "Webcam", CV_WINDOW_AUTOSIZE );
CvVideoWriter *writer = cvCreateVideoWriter( "vidtry.AVI",
CV_FOURCC('A','V','C','1'),
30,
size
);
while( (bgr_frame = cvQueryFrame( capture )) != NULL )
{
cvWriteFrame(writer, bgr_frame );
cvShowImage( "Webcam", bgr_frame );
char c = cvWaitKey( 33 );
if( c == 27 ) break;
}
cvReleaseVideoWriter( &writer );
cvReleaseCapture( &capture );
cvDestroyWindow( "Webcam" );
return( 0 );
}

Clock_gettime() function outputting incorrect time

I am trying to get the runtime of the following code using the clock_gettime function. However when I am running the code I am receiving a time of 0.0000 every time it runs. I have output the start and stop time individually also and I am receiving the exact same answer.
struct timespec start, stop;
double accum;
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
int src = 1, final_ret = 0;
for (int t = 0; t < rows - 1; t += pyramid_height)
{
int temp = src;
src = final_ret;
final_ret = temp;
// Calculate this for the kernel argument...
int arg0 = MIN(pyramid_height, rows-t-1);
int theHalo = HALO;
// Set the kernel arguments.
clSetKernelArg(cl.kernel(kn), 0, sizeof(cl_int), (void*) &arg0);
clSetKernelArg(cl.kernel(kn), 1, sizeof(cl_mem), (void*) &d_gpuWall);
clSetKernelArg(cl.kernel(kn), 2, sizeof(cl_mem), (void*) &d_gpuResult[src]);
clSetKernelArg(cl.kernel(kn), 3, sizeof(cl_mem), (void*) &d_gpuResult[final_ret]);
clSetKernelArg(cl.kernel(kn), 4, sizeof(cl_int), (void*) &cols);
clSetKernelArg(cl.kernel(kn), 5, sizeof(cl_int), (void*) &rows);
clSetKernelArg(cl.kernel(kn), 6, sizeof(cl_int), (void*) &t);
clSetKernelArg(cl.kernel(kn), 7, sizeof(cl_int), (void*) &borderCols);
clSetKernelArg(cl.kernel(kn), 8, sizeof(cl_int), (void*) &theHalo);
clSetKernelArg(cl.kernel(kn), 9, sizeof(cl_int) * (cl.localSize()), 0);
clSetKernelArg(cl.kernel(kn), 10, sizeof(cl_int) * (cl.localSize()), 0);
clSetKernelArg(cl.kernel(kn), 11, sizeof(cl_mem), (void*) &d_outputBuffer);
cl.launch(kn);
}
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) {
perror( "clock gettime" );
exit( EXIT_FAILURE );
}
printf( "%lf\n", stop.tv_sec );
printf( "%lf\n", start.tv_sec );
accum = ( stop.tv_sec - start.tv_sec )
+ ( stop.tv_nsec - start.tv_nsec )
/ BILLION;
printf( "%lf\n", accum );
Any advice on what I'm doing wrong is much appreciated
timespec::tv_nsec is an integer type, so if BILLION is also an integer type then:
( stop.tv_nsec - start.tv_nsec )
/ BILLION;
will truncate to zero. If the tv_sec values are the same you get a zero difference.
Try:
double( stop.tv_nsec - start.tv_nsec )
/ BILLION;
That will perform the division with a double type.

Creating Seperate Context for Each GPU while having one display monitor

I want to create one GL Context for each GPU on Linux using the GLX. As nVIDIA Slides show, it is pretty simple and I just have to use ":0.0" for the first gpu and ":0.1" for the second one in XOpenDisplay function. I have tried it but it only works with ":0.0" but not with ":0.1". I have two gpus: GTX 980 and GTX 970. Also, as the xorg.conf shows the Xinerama is disabled. Furthermore, I only have one display monitor and it is connected to the GTX 980.
Do you have any idea about how to fix that? or what is missing?
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <GL/gl.h>
#include <GL/glx.h>
#define GLX_CONTEXT_MAJOR_VERSION_ARB 0x2091
#define GLX_CONTEXT_MINOR_VERSION_ARB 0x2092
typedef GLXContext (*glXCreateContextAttribsARBProc)(Display*, GLXFBConfig, GLXContext, Bool, const int*);
// Helper to check for extension string presence. Adapted from:
// http://www.opengl.org/resources/features/OGLextensions/
static bool isExtensionSupported(const char *extList, const char *extension)
{
const char *start;
const char *where, *terminator;
/* Extension names should not have spaces. */
where = strchr(extension, ' ');
if (where || *extension == '\0')
return false;
/* It takes a bit of care to be fool-proof about parsing the
OpenGL extensions string. Don't be fooled by sub-strings,
etc. */
for (start=extList;;) {
where = strstr(start, extension);
if (!where)
break;
terminator = where + strlen(extension);
if ( where == start || *(where - 1) == ' ' )
if ( *terminator == ' ' || *terminator == '\0' )
return true;
start = terminator;
}
return false;
}
static bool ctxErrorOccurred = false;
static int ctxErrorHandler( Display *dpy, XErrorEvent *ev )
{
ctxErrorOccurred = true;
return 0;
}
int main(int argc, char* argv[])
{
Display *display = XOpenDisplay(":0.1");
if (!display)
{
printf("Failed to open X display\n");
exit(1);
}
// Get a matching FB config
static int visual_attribs[] =
{
GLX_X_RENDERABLE , True,
GLX_DRAWABLE_TYPE , GLX_WINDOW_BIT,
GLX_RENDER_TYPE , GLX_RGBA_BIT,
GLX_X_VISUAL_TYPE , GLX_TRUE_COLOR,
GLX_RED_SIZE , 8,
GLX_GREEN_SIZE , 8,
GLX_BLUE_SIZE , 8,
GLX_ALPHA_SIZE , 8,
GLX_DEPTH_SIZE , 24,
GLX_STENCIL_SIZE , 8,
GLX_DOUBLEBUFFER , True,
//GLX_SAMPLE_BUFFERS , 1,
//GLX_SAMPLES , 4,
None
};
int glx_major, glx_minor;
// FBConfigs were added in GLX version 1.3.
if ( !glXQueryVersion( display, &glx_major, &glx_minor ) ||
( ( glx_major == 1 ) && ( glx_minor < 3 ) ) || ( glx_major < 1 ) )
{
printf("Invalid GLX version");
exit(1);
}
printf( "Getting matching framebuffer configs\n" );
int fbcount;
GLXFBConfig* fbc = glXChooseFBConfig(display, DefaultScreen(display), visual_attribs, &fbcount);
if (!fbc)
{
printf( "Failed to retrieve a framebuffer config\n" );
exit(1);
}
printf( "Found %d matching FB configs.\n", fbcount );
// Pick the FB config/visual with the most samples per pixel
printf( "Getting XVisualInfos\n" );
int best_fbc = -1, worst_fbc = -1, best_num_samp = -1, worst_num_samp = 999;
int i;
for (i=0; i<fbcount; ++i)
{
XVisualInfo *vi = glXGetVisualFromFBConfig( display, fbc[i] );
if ( vi )
{
int samp_buf, samples;
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLE_BUFFERS, &samp_buf );
glXGetFBConfigAttrib( display, fbc[i], GLX_SAMPLES , &samples );
printf( " Matching fbconfig %d, visual ID 0x%2x: SAMPLE_BUFFERS = %d,"
" SAMPLES = %d\n",
i, vi -> visualid, samp_buf, samples );
if ( best_fbc < 0 || samp_buf && samples > best_num_samp )
best_fbc = i, best_num_samp = samples;
if ( worst_fbc < 0 || !samp_buf || samples < worst_num_samp )
worst_fbc = i, worst_num_samp = samples;
}
XFree( vi );
}
GLXFBConfig bestFbc = fbc[ best_fbc ];
// Be sure to free the FBConfig list allocated by glXChooseFBConfig()
XFree( fbc );
// Get a visual
XVisualInfo *vi = glXGetVisualFromFBConfig( display, bestFbc );
printf( "Chosen visual ID = 0x%x\n", vi->visualid );
printf( "Creating colormap\n" );
XSetWindowAttributes swa;
Colormap cmap;
swa.colormap = cmap = XCreateColormap( display,
RootWindow( display, vi->screen ),
vi->visual, AllocNone );
swa.background_pixmap = None ;
swa.border_pixel = 0;
swa.event_mask = StructureNotifyMask;
printf( "Creating window\n" );
Window win = XCreateWindow( display, RootWindow( display, vi->screen ),
0, 0, 100, 100, 0, vi->depth, InputOutput,
vi->visual,
CWBorderPixel|CWColormap|CWEventMask, &swa );
if ( !win )
{
printf( "Failed to create window.\n" );
exit(1);
}
// Done with the visual info data
XFree( vi );
XStoreName( display, win, "GL 3.0 Window" );
printf( "Mapping window\n" );
XMapWindow( display, win );
// Get the default screen's GLX extension list
const char *glxExts = glXQueryExtensionsString( display,
DefaultScreen( display ) );
// NOTE: It is not necessary to create or make current to a context before
// calling glXGetProcAddressARB
glXCreateContextAttribsARBProc glXCreateContextAttribsARB = 0;
glXCreateContextAttribsARB = (glXCreateContextAttribsARBProc)
glXGetProcAddressARB( (const GLubyte *) "glXCreateContextAttribsARB" );
GLXContext ctx = 0;
// Install an X error handler so the application won't exit if GL 3.0
// context allocation fails.
//
// Note this error handler is global. All display connections in all threads
// of a process use the same error handler, so be sure to guard against other
// threads issuing X commands while this code is running.
ctxErrorOccurred = false;
int (*oldHandler)(Display*, XErrorEvent*) =
XSetErrorHandler(&ctxErrorHandler);
// Check for the GLX_ARB_create_context extension string and the function.
// If either is not present, use GLX 1.3 context creation method.
if ( !isExtensionSupported( glxExts, "GLX_ARB_create_context" ) ||
!glXCreateContextAttribsARB )
{
printf( "glXCreateContextAttribsARB() not found"
" ... using old-style GLX context\n" );
ctx = glXCreateNewContext( display, bestFbc, GLX_RGBA_TYPE, 0, True );
}
// If it does, try to get a GL 3.0 context!
else
{
int context_attribs[] =
{
GLX_CONTEXT_MAJOR_VERSION_ARB, 3,
GLX_CONTEXT_MINOR_VERSION_ARB, 0,
//GLX_CONTEXT_FLAGS_ARB , GLX_CONTEXT_FORWARD_COMPATIBLE_BIT_ARB,
None
};
printf( "Creating context\n" );
ctx = glXCreateContextAttribsARB( display, bestFbc, 0,
True, context_attribs );
// Sync to ensure any errors generated are processed.
XSync( display, False );
if ( !ctxErrorOccurred && ctx )
printf( "Created GL 3.0 context\n" );
else
{
// Couldn't create GL 3.0 context. Fall back to old-style 2.x context.
// When a context version below 3.0 is requested, implementations will
// return the newest context version compatible with OpenGL versions less
// than version 3.0.
// GLX_CONTEXT_MAJOR_VERSION_ARB = 1
context_attribs[1] = 1;
// GLX_CONTEXT_MINOR_VERSION_ARB = 0
context_attribs[3] = 0;
ctxErrorOccurred = false;
printf( "Failed to create GL 3.0 context"
" ... using old-style GLX context\n" );
ctx = glXCreateContextAttribsARB( display, bestFbc, 0,
True, context_attribs );
}
}
// Sync to ensure any errors generated are processed.
XSync( display, False );
// Restore the original error handler
XSetErrorHandler( oldHandler );
if ( ctxErrorOccurred || !ctx )
{
printf( "Failed to create an OpenGL context\n" );
exit(1);
}
// Verifying that context is a direct context
if ( ! glXIsDirect ( display, ctx ) )
{
printf( "Indirect GLX rendering context obtained\n" );
}
else
{
printf( "Direct GLX rendering context obtained\n" );
}
printf( "Making context current\n" );
glXMakeCurrent( display, win, ctx );
glClearColor( 0, 0.5, 1, 1 );
glClear( GL_COLOR_BUFFER_BIT );
glXSwapBuffers ( display, win );
sleep( 1 );
glClearColor ( 1, 0.5, 0, 1 );
glClear ( GL_COLOR_BUFFER_BIT );
glXSwapBuffers ( display, win );
sleep( 1 );
glXMakeCurrent( display, 0, 0 );
glXDestroyContext( display, ctx );
XDestroyWindow( display, win );
XFreeColormap( display, cmap );
XCloseDisplay( display );
return 0;
}
The reason it works with ":0.0" but not with ":0.1" is because they are the X display and screen numbers. ":0.0" means the first screen on the first display and ":0.1" means the second screen on the first display.
These numbers are for selecting which monitor you wish to display the window to and not which GPU you wish to use. As you have only one monitor attached you only have one screen so ":0.1" fails.
I believe the slides expect you to have two or more monitors attached, each driven by a different GPU.

Converting DTED to other format

I'm trying to convert a DTED file in another format so I can display it in an application, for now I'm targetting the JPEG format. What I need is basically the gdal_translate command but through the C++ API.
I'm using the GDAL C++ API and I can read the input DTED file and read it without problem :
GDALAllRegister();
GDALDataset *poDataset;
poDataset = (GDALDataset *) GDALOpen( "n43.dt2", GA_ReadOnly );
if( poDataset == NULL )
{
return 0;
}
I can also access the corresponding band without an issue.
GDALRasterBand *poBand;
int nBlockXSize, nBlockYSize;
int bGotMin, bGotMax;
double adfMinMax[2];
poBand = poDataset->GetRasterBand( 1 );
poBand->GetBlockSize( &nBlockXSize, &nBlockYSize );
printf( "Block=%dx%d Type=%s, ColorInterp=%s\n",
nBlockXSize, nBlockYSize,
GDALGetDataTypeName(poBand->GetRasterDataType()),
GDALGetColorInterpretationName(
poBand->GetColorInterpretation()) );
adfMinMax[0] = poBand->GetMinimum( &bGotMin );
adfMinMax[1] = poBand->GetMaximum( &bGotMax );
if( ! (bGotMin && bGotMax) )
GDALComputeRasterMinMax((GDALRasterBandH)poBand, TRUE, adfMinMax);
printf( "Min=%.3fd, Max=%.3f\n", adfMinMax[0], adfMinMax[1] );
if( poBand->GetOverviewCount() > 0 )
printf( "Band has %d overviews.\n", poBand->GetOverviewCount() );
if( poBand->GetColorTable() != NULL )
printf( "Band has a color table with %d entries.\n",
poBand->GetColorTable()->GetColorEntryCount() );
But I can't figure out how to add this band in another dataset using the wanted driver. My application crashes when I try to use the AddBand function.
float *pafScanline;
int nXSize = poBand->GetXSize();
pafScanline = (float *) CPLMalloc(sizeof(float)*nXSize);
poBand->RasterIO( GF_Read, 0, 0, nXSize, 1,
pafScanline, nXSize, 1, GDT_Float32,
0, 0 );
GDALClose(poDataset);
GDALDataset *resDataset;
GDALRasterBand *resBand;
resDataset->AddBand (GDT_Float32, NULL);//<-application crashes here
/*resBand = resDataset->GetRasterBand(1);
resBand->RasterIO( GF_Write, 0, 0, nXSize, 1,
pafScanline, nXSize, 1, GDT_Float32,
0, 0 );*/
So I guess what I'm trying to do is not the proper way to do what I need. Could you explain to me what I am doing wrong ?
Okay I think I figured it out : unless going through a virtual raster, I shouldn't use raster bands but just CreatCopy.
Here's a working code for me :
#include <iostream>
#include "gdal_priv.h"
#include "cpl_conv.h" // for CPLMalloc()
using namespace std;
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
MainWindow w;
w.show();
GDALAllRegister();
GDALDatasetH poDataset;
poDataset = (GDALDataset *) GDALOpenShared( "n43.dt2", GA_ReadOnly );
if( poDataset == NULL )
{
return 0;
}
const char *pszFormat = "PNG";
GDALDatasetH resDataset;
GDALProgressFunc pfnProgress = GDALTermProgress;
GDALDriverH hDriver = GDALGetDriverByName( pszFormat );
const char *pszDest = "n43.png";
char **papszCreateOptions = NULL;
resDataset = GDALCreateCopy( hDriver, pszDest, poDataset,
FALSE, papszCreateOptions,
pfnProgress, NULL );
if( resDataset != NULL )
{
GDALClose( resDataset );
}
else
{
printf("Error creating output dataset.");
}
GDALClose(poDataset);
CSLDestroy( papszCreateOptions );
return 1;
}
This gives a slightly brighter PNG image than the one I get from using gdal_translate, I still need to figure out what's causing this. This works with JPEG images but the result cannot be read (I think it's an application specific format as it is called "JPEG JTIF" by gdalinfo --formats).

Convert unsigned char * to FlyCapture2 Image for cvShowImage OpenCV

I'm using three ptgrey cameras to acquire images and save them into my hard disk.
I'm using the MultiCameraWriteToDiskEx example and it works great but I would like also to display images during the acquisition so I'm trying to convert FlyCapture images to a readeable OpenCV format in order to show them with cvShowImage() or imshow() functions.
I have a function IplImage* ConvertImageToOpenCV(Image* pImage) which can convert Flycapture2 Image* to OpenCV IplImage* but I do not know how to correctly convert unsigned char * to FlyCapture2::Image* in doGrabLoop() function which capture frames from the cameras.
Can you help me, please?
I'm not very confident with C/C++ :(
In particular, I do not know how to convert g_arImageplus[ uiCamera ].image.pData in order to pass it to ConvertImageToOpenCV().
(Is it correct to use g_arImageplus[ uiCamera ].image.pData?)
IplImage* ConvertImageToOpenCV(Image* pImage)
{
IplImage* cvImage = NULL;
bool bColor = true;
CvSize mySize;
mySize.height = pImage->GetRows();
mySize.width = pImage->GetCols();
printf("ciao %d\n", pImage->GetPixelFormat() );
switch ( pImage->GetPixelFormat() )
{
case PIXEL_FORMAT_MONO8: cvImage = cvCreateImageHeader(mySize, 8, 1 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_411YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_422YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_444YUV8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RGB8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_MONO16: cvImage = cvCreateImageHeader(mySize, 16, 1 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_RGB16: cvImage = cvCreateImageHeader(mySize, 16, 3 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_S_MONO16: cvImage = cvCreateImageHeader(mySize, 16, 1 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 1;
bColor = false;
break;
case PIXEL_FORMAT_S_RGB16: cvImage = cvCreateImageHeader(mySize, 16, 3 );
cvImage->depth = IPL_DEPTH_16U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RAW8: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_RAW16: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_MONO12: printf("Not supported by OpenCV");
bColor = false;
break;
case PIXEL_FORMAT_RAW12: printf("Not supported by OpenCV");
break;
case PIXEL_FORMAT_BGR: cvImage = cvCreateImageHeader(mySize, 8, 3 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 3;
break;
case PIXEL_FORMAT_BGRU: cvImage = cvCreateImageHeader(mySize, 8, 4 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 4;
break;
case PIXEL_FORMAT_RGBU: cvImage = cvCreateImageHeader(mySize, 8, 4 );
cvImage->depth = IPL_DEPTH_8U;
cvImage->nChannels = 4;
break;
default: printf("Some error occured...\n");
return NULL;
}
if(bColor) {
if(!bInitialized)
{
colorImage.SetData(new unsigned char[pImage->GetCols() * pImage->GetRows()*3], pImage->GetCols() * pImage->GetRows()*3);
bInitialized = true;
}
pImage->Convert(PIXEL_FORMAT_BGR, &colorImage); //needs to be as BGR to be saved
cvImage->width = colorImage.GetCols();
cvImage->height = colorImage.GetRows();
cvImage->widthStep = colorImage.GetStride();
cvImage->origin = 0; //interleaved color channels
cvImage->imageDataOrigin = (char*)colorImage.GetData(); //DataOrigin and Data same pointer, no ROI
cvImage->imageData = (char*)(colorImage.GetData());
cvImage->widthStep = colorImage.GetStride();
cvImage->nSize = sizeof (IplImage);
cvImage->imageSize = cvImage->height * cvImage->widthStep;
}
else
{
cvImage->imageDataOrigin = (char*)(pImage->GetData());
cvImage->imageData = (char*)(pImage->GetData());
cvImage->widthStep = pImage->GetStride();
cvImage->nSize = sizeof (IplImage);
cvImage->imageSize = cvImage->height * cvImage->widthStep;
//at this point cvImage contains a valid IplImage
}
return cvImage;
}
//
// Grab and test loop
//
int doGrabLoop()
{
FlyCaptureError error = FLYCAPTURE_FAILED;
unsigned int aruiPrevSeqNum[ _MAX_CAMERAS ];
unsigned int aruiDelta[ _MAX_CAMERAS ];
unsigned int aruiCycles[ _MAX_CAMERAS ];
HANDLE arhFile[ _MAX_CAMERAS ];
DWORD ardwBytesWritten[ _MAX_CAMERAS ];
DWORD dwTotalKiloBytesWritten = 0;
bool bMissed = false;
bool bOutOfSync = false;
unsigned int uiMissedImages = 0;
unsigned int uiOutOfSyncImages = 0;
__int64 nStartTime = 0;
__int64 nEndTime = 0;
__int64 nDifference = 0;
__int64 nTotalTime = 0;
__int64 nGlobalStartTime = 0;
__int64 nGlobalEndTime = 0;
__int64 nGlobalTotalTime = 0;
__int64 nFrequency = 0;
QueryPerformanceFrequency( (LARGE_INTEGER*)&nFrequency );
QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalStartTime );
printf( "Starting grab...\n" );
// Create files to write to
if ( createFiles( arhFile ) != 0 )
{
printf( "There was error creating the files\n" );
return -1;
}
BOOL bSuccess;
//
// Start grabbing the images
//
for( int iImage = 0; iImage < g_iNumImagesToGrab; iImage++ )
{
#ifdef _VERBOSE
printf( "Grabbing image %u\n", iImage );
#else
printf( "." );
#endif
unsigned int uiCamera = 0;
// Grab an image from each camera
for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
error = flycaptureLockNext( g_arContext[uiCamera], &g_arImageplus[uiCamera] );
_HANDLE_ERROR( error, "flycaptureLockNext()" );
// Save image dimensions & bayer info from first image for each camera
if(iImage == 0)
{
g_arImageTemplate[uiCamera] = g_arImageplus[uiCamera].image;
error = flycaptureGetColorTileFormat(g_arContext[uiCamera], &g_arBayerTile[uiCamera]);
_HANDLE_ERROR( error, "flycaptureGetColorTileFormat()" );
}
}
for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
// Start timer
QueryPerformanceCounter( (LARGE_INTEGER*)&nStartTime );
// Calculate the size of the image to be written
int iImageSize = 0;
int iRowInc = g_arImageplus[uiCamera].image.iRowInc;
int iRows = g_arImageplus[uiCamera].image.iRows;
iImageSize = iRowInc * iRows;
// ERROR: HOW CAN I CONVERT g_arImageplus[ uiCamera ].image.pData IN ORDER TO USE IT WITH ConvertImageToOpenCV() function?
IplImage* destImage = ConvertImageToOpenCV(g_arImageplus[ uiCamera ].image.pData);
cvShowImage("prova", destImage);
waitKey(1);
// Write to the file
bSuccess = WriteFile(
arhFile[uiCamera],
g_arImageplus[ uiCamera ].image.pData,
iImageSize,
&ardwBytesWritten[uiCamera],
NULL );
// End timer
QueryPerformanceCounter( (LARGE_INTEGER*)&nEndTime );
// Ensure that the write was successful
if ( !bSuccess || ( ardwBytesWritten[uiCamera] != (unsigned)iImageSize ) )
{
printf( "Error writing to file for camera %u!\n", uiCamera );
return -1;
}
// Update various counters
dwTotalKiloBytesWritten += (ardwBytesWritten[uiCamera] / 1024);
nDifference = nEndTime - nStartTime;
nTotalTime += nDifference;
// Keep track of the difference in image sequence numbers (uiSeqNum)
// in order to determine if any images have been missed. A difference
// greater than 1 indicates that an image has been missed.
if( iImage == 0 )
{
// This is the first image, set up the variables
aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;
aruiDelta[uiCamera] = 1;
}
else
{
// Get the difference in sequence numbers between the current
// image and the last image we received
aruiDelta[uiCamera] =
g_arImageplus[uiCamera].uiSeqNum - aruiPrevSeqNum[uiCamera];
}
if( aruiDelta[uiCamera] != 1 )
{
// We have missed an image.
bMissed = true;
uiMissedImages += aruiDelta[uiCamera] - 1;
}
else
{
bMissed = false;
}
aruiPrevSeqNum[uiCamera] = g_arImageplus[uiCamera].uiSeqNum;
// Calculate the cycle count for the camera
aruiCycles[uiCamera] =
g_arImageplus[uiCamera].image.timeStamp.ulCycleSeconds * 8000 +
g_arImageplus[uiCamera].image.timeStamp.ulCycleCount;
// Determine the difference of the timestamp for every image from the
// first camera. If the difference is greater than 1 cycle count,
// register the camera as being out of synchronization.
int iDeltaFrom0 = abs( (int)(aruiCycles[uiCamera] - aruiCycles[0]) );
if( ( iDeltaFrom0 % ( 128 * 8000 - 1 ) ) > 1 )
{
bOutOfSync = true;
uiOutOfSyncImages++;
}
else
{
bOutOfSync = false;
}
#ifdef _VERBOSE
// Output is in the following order:
// - The index of the image being captured
// - The index of the camera that is currently being captured
// - The time taken to write the image to disk
// - Number of kilobytes written
// - Write speed (in MB/s)
// - Sequence number
// - Cycle seconds in timestamp
// - Cycle count in timestamp
// - Delta from 0th value
// - Missed an image?
// - Out of sync?
printf(
"%04d: \t%02u\t%0.5f\t%.0lf\t%.2lf\t%04u\t%03u.%04u\t%d\t%s %s\n",
iImage,
uiCamera,
nDifference,
(double)ardwBytesWritten[ uiCamera ] / 1024.0,
(double)ardwBytesWritten[ uiCamera ] / ( 1024 * 1024 * nDifference ),
g_arImageplus[ uiCamera ].uiSeqNum,
g_arImageplus[ uiCamera ].image.timeStamp.ulCycleSeconds,
g_arImageplus[ uiCamera ].image.timeStamp.ulCycleCount,
iDeltaFrom0,
bMissed ? "Y" : "N",
bOutOfSync ? "Y" : "N");
#endif
}
// Unlock image, handing the buffer back to the buffer pool.
for( uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
error = flycaptureUnlock(
g_arContext[uiCamera], g_arImageplus[uiCamera].uiBufferIndex );
_HANDLE_ERROR( error, "flycaptureUnlock()" );
}
}
//
// Done grabbing images
//
QueryPerformanceCounter( (LARGE_INTEGER*)&nGlobalEndTime );
nGlobalTotalTime = nGlobalEndTime - nGlobalStartTime;
double dGlobalTotalTime = (double)nGlobalTotalTime / (double)nFrequency;
double dTotalTime = (double)nTotalTime / (double)nFrequency;
// Report on the results
// Burst time is the time that was spent writing to disk only
// Overall time is total time taken, including image grabs, calculations etc
printf(
"\nBurst: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n",
(double)( dwTotalKiloBytesWritten / 1024 ),
dTotalTime,
(double)( dwTotalKiloBytesWritten / ( 1024 * dTotalTime ) ) );
printf(
"Overall: Wrote %.1lfMB in %0.2fs ( %.2lfMB/sec )\n",
(double)( dwTotalKiloBytesWritten / 1024 ),
dGlobalTotalTime,
(double)( dwTotalKiloBytesWritten / ( 1024 * dGlobalTotalTime ) ) );
printf( g_bSyncSuccess ? "Sync success\n" : "Sync failed\n" );
printf( "Missed images = %u.\n", uiMissedImages );
printf( "Out of sync images = %u.\n", uiOutOfSyncImages );
for ( unsigned int uiCamera = 0; uiCamera < g_uiNumCameras; uiCamera++ )
{
// Close file handles
CloseHandle(arhFile[uiCamera]);
}
return 0;
}
Your conversion method:
IplImage* ConvertImageToOpenCV(Image* pImage)
takes an Image pointer. Construct a new Image to pass to it from the data you have. Here's the doc for the Image class. If you use this function, it will not hold onto the buffer or copy it:
Image ( unsigned int rows,
unsigned int cols,
unsigned int stride,
unsigned char* pData,
unsigned int dataSize,
PixelFormat format,
BayerTileFormat bayerFormat = NONE
)
So your code would look something like:
// Pull these values from your other image structure
Image image(rows, cols, stride, pData, dataSize, format, bayerFormat);
// Pass the address of the temporary image to your conversion
IplImage* opencvImage = ConvertImageToOpenCV(&image);