I can't move TrackBar - c++

I am coding opencv source for playing video. I want to add trackBar and adjust video speed. But, The TrackBar does't move. and I can't focus the Video window. This is my code OpenCv C++. What should i do?
void onTrackbarSlide(int pos, void *){
sec = 1;
sec /= pos;
printf("pos = %d\n",pos);
}
int main(void)
{
strcat(InputFile, FileName); // input video file
//VideoCapture cap(0); // Create an object and open the default(0) camera. C++ Syntax: VideoCapture::VideoCapture(int device)
VideoCapture cap; // Class for video capturing from video files or cameras.
cap.open(InputFile); // Open video file or a capturing device for video capturing
if (!cap.isOpened()) { // Check if the file is opened sucessfully.
printf("\nFail to open a video file");
return -1;
}
// When querying a property that is not supported by the backend used by the VideoCapture class, value 0 is returned.
double fps = cap.get(CV_CAP_PROP_FPS); printf("\nCV_CAP_PROP_FPS = %f", fps);
double ToTalFrames = cap.get(CV_CAP_PROP_FRAME_COUNT); printf("\nCV_CAP_PROP_FRAME_COUNT = %f", fps);
CvSize FrameSize;
FrameSize.width = (int)cap.get(CV_CAP_PROP_FRAME_WIDTH);
FrameSize.height = (int)cap.get(CV_CAP_PROP_FRAME_HEIGHT);
printf("\nWidth * Height = %d * %d\n", FrameSize.width, FrameSize.height);
VideoWriter wrt;
#define CODEC -1
namedWindow("original", 1);
int slider_position = 0;
int slider_max = 255;
createTrackbar("video speed", "original", &slider_position, slider_max, onTrackbarSlide);
wrt.open(OutputFile, CODEC, fps, FrameSize);
Mat frame, dst1;
Mat dst2;
for (int i = 1; i< ToTalFrames; i++)
{
cap.read(frame);
if (frame.data == NULL) { printf("\nNo image found!\n"); return(0); }
imshow("original", frame);
if( waitKey(sec /fps) == 0x1b ) break; // Break if key input is escape code.
}
return 0;
}

Related

opencv cannot load the camera parameters (DynamicLib::libraryload ....\opencv_videoio_gstreamer452_64d.dll)

When I run the code, the Calibration work but it cannot load the arucoMarker camera parameters in to the exe windows.
When I debug the code
The cmd get error DynamicLib::libraryload ....\opencv_videoio_gstreamer452_64d.dll every time it get into
vid.isOpened()
vid.read(frame)
Here is the full code of the function
void cameraCalibrationProcess(Mat& cameraMatrix, Mat& distortionCoefficients)
{
Mat frame;
Mat drawtoframe;
vector<Mat> savedImages;
vector<vector<Point2f>> markerCorners, rejectedCandidates; //for aruco marker
VideoCapture vid(0);
if (!vid.isOpened())
{
return;
}
int framepersecond = 20;
namedWindow("Webcam", WINDOW_AUTOSIZE);
while (true)
{
if (!vid.read(frame))
break;
vector<Vec2f> foundpoints;
bool found = false;
found = findChessboardCorners(frame, checkerboard, foundpoints, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE | CALIB_CB_FAST_CHECK);
frame.copyTo(drawtoframe);
drawChessboardCorners(drawtoframe, checkerboard, foundpoints, found);
if (found)
{
imshow("Webcam", drawtoframe);
}
else
imshow("Webcam", frame);
char character = waitKey(1000 / framepersecond);
}

How to use "raspicam::RaspiCam_Cv" instead of "CvCapture *capture = cvCaptureFromAVI(a.avi)" in OpenCv C++

I am detecting shapes in real time with the help of OpenCv in C++ programming language. I found a code that reads from the folder and detect shapes. But in My case camera should detect in real time. How can I use raspicam::RaspiCam_Cv capture; instead of CvCapture *capture = cvCaptureFromAVI("a.avi"); in C++.
#include <cv.h>
#include <highgui.h>
using namespace std;
IplImage* imgTracking=0;
int lastX1 = -1;
int lastY1 = -1;
int lastX2 = -1;
int lastY2 = -1;
void trackObject(IplImage* imgThresh){
CvSeq* contour; //hold the pointer to a contour
CvSeq* result; //hold sequence of points of a contour
CvMemStorage *storage = cvCreateMemStorage(0); //storage area for all contours
//finding all contours in the image
cvFindContours(imgThresh, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//iterating through each contour
while(contour)
{
//obtain a sequence of points of the countour, pointed by the variable 'countour'
result = cvApproxPoly(contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour)*0.02, 0);
//if there are 3 vertices in the contour and the area of the triangle is more than 100 pixels
if(result->total==3 && fabs(cvContourArea(result, CV_WHOLE_SEQ))>100 )
{
//iterating through each point
CvPoint *pt[3];
for(int i=0;i<3;i++){
pt[i] = (CvPoint*)cvGetSeqElem(result, i);
}
int posX=( pt[0]->x + pt[1]->x + pt[2]->x )/3;
int posY=( pt[0]->y + pt[1]->y + pt[2]->y )/3;
if(posX > 360 ){
if(lastX1>=0 && lastY1>=0 && posX>=0 && posY>=0){
// Draw a red line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX1, lastY1), cvScalar(0,0,255), 4);
}
lastX1 = posX;
lastY1 = posY;
}
else{
if(lastX2>=0 && lastY2>=0 && posX>=0 && posY>=0){
// Draw a blue line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX2, lastY2), cvScalar(255,0,0), 4);
}
lastX2 = posX;
lastY2 = posY;
}
}
//obtain the next contour
contour = contour->h_next;
}
cvReleaseMemStorage(&storage);
}
int main(){
//load the video file to the memory
CvCapture *capture = cvCaptureFromAVI("a.avi");
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(capture);
if(!frame) return -1;
//create a blank image and assigned to 'imgTracking' which has the same size of original video
imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);
cvZero(imgTracking); //covert the image, 'imgTracking' to black
cvNamedWindow("Video");
//iterate through each frames of the video
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
//smooth the original image using Gaussian kernel
cvSmooth(frame, frame, CV_GAUSSIAN,3,3);
//converting the original image into grayscale
IplImage* imgGrayScale = cvCreateImage(cvGetSize(frame), 8, 1);
cvCvtColor(frame,imgGrayScale,CV_BGR2GRAY);
//thresholding the grayscale image to get better results
cvThreshold(imgGrayScale,imgGrayScale,100,255,CV_THRESH_BINARY_INV);
//track the possition of the ball
trackObject(imgGrayScale);
// Add the tracking image and the frame
cvAdd(frame, imgTracking, frame);
cvShowImage("Video", frame);
//Clean up used images
cvReleaseImage(&imgGrayScale);
cvReleaseImage(&frame);
//Wait 10mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows();
cvReleaseImage(&imgTracking);
cvReleaseCapture(&capture);
return 0;
}
I cannot use raspicam::RaspiCam_Cv capture; keyword instead of CvCapture *capture = cvCaptureFromAVI(); I should detect shapes in real time for example when triangle comes then call some function. Please help me

Issues with cvseq push and size in opencv using c++

I'm trying to write a simple C++ script using opencv to detect faces and save one facial square box for each new face showing up in the camera. The frame should be right, but I'm struggling with the pointer / esp. "total" around cvseq. Could someone help me? Whenever it gets to the row "cvSeqPush(faces_new, r);", faces_new shows up with 60 or 70 in the faces_new->total, without ever being allocated with anything.... Very frustrated, and would really appreciate some help.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
static CvMemStorage* storage3 = 0;
storage3 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
CvSeq *faces_new = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage3); // sequnece of faces in the camera image - NEW FACES
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
cout << "faces_new"<< faces_new->total<< "\n";
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
//cout << "New PERSON!!";
cvSeqPush(faces_new, r);
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
cvSeqPush(faces_new, r);
//cout << "faces_new"<< faces_new->total<< "\n";
}
}
}
}
//*************************************************************************************/
//Step 3: Process faces - save new faces, report new faces
//*************************************************************************************/
if ((faces_new->total)>0) {
// To change to save only faces_new
save_faces(faces_new, imgCamera, imgFace, scale, filecounter);
// report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
filecounter = filecounter+(faces_new-> total);}
cvClearMemStorage(storage2);
cvSeqPush(faces_last, faces);
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
cvClearMemStorage(storage3);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
Finally got it work.
// memeory allocation
static CvMemStorage* storage = 0;
storage = cvCreateMemStorage(0);
static CvMemStorage* storage2 = 0;
storage2 = cvCreateMemStorage(0);
// Create a new named window with title: result
cvNamedWindow("Window"); // create a window to display in
CvCapture* capture = capture = cvCaptureFromCAM(-1); // capture from video device (Macbook iSight)
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 1000 );// set window size to 640
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 ); // set window size to 480
// Declare images
IplImage *imgCamera; // captured from camera
IplImage *imgCamera_last; // last campera image
IplImage *imgDrawn; // image with drawing (rect containing faces)
IplImage *imgFace; // face picture extracted from the camera
CvRect *r; // rects containing faces
CvSeq *faces = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage); ; // sequnece of faces in the camera image - CURRENT
CvSeq *faces_last = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvRect), storage2); // sequnece of faces in the camera image - LAST FRAME
float scale = 1.0/5; // how far do we want to scale down the haar detect objects images for speed
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 );
// file name where to save the file
std::stringstream filename;
int counter = 1;
int filecounter = 1;
while(1) {
//*************************************************************************************/
//Step 1: stream video. Video to images
//*************************************************************************************/
// capture frame from video and then turn it into one single image-imgCamera
capture_frame(capture, imgCamera);
// allocate an image to be used later
imgDrawn = cvCreateImage(cvGetSize(imgCamera), imgCamera->depth, imgCamera->nChannels);
imgFace = cvCreateImage(cvSize(600, 600), imgCamera->depth, imgCamera->nChannels);
cvCopy(imgCamera, imgDrawn);
if (counter == 10) { // take action for every 10 frames
counter = 1;
//*************************************************************************************/
//Step 2: Detection
//*************************************************************************************/
find_faces(imgCamera, storage, cascade, faces, scale);
//printf("Last faces seq had %d faces detected. \n",faces_last->total);
//*************************************************************************************/
//Step 4: Draw every face in the picture
//*************************************************************************************/
// for each face found in the image
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
// draw the rectange around the face on the imgDrawn
draw_rect(imgDrawn, r, scale);
}
cvShowImage("Window", imgDrawn);
// press escape to quit
if( cvWaitKey(33) == 27 ) break;
//*************************************************************************************/
//Step 3: Recognize the new faces
//*************************************************************************************/
//TO DO: Combined the following into a funciton: match_faces(faces_new, faces, faces_last, lastspotted, currentspotted, imgCamera);
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
if (faces_last->total == 0) {
cout<<"a face appeared: "<<"there are total faces of "<<faces_last->total<<"\n";
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
else {
for(int k = 0; k < (faces_last ? faces_last->total : 0); k++ ){
CvRect *r_last = (CvRect*)cvGetSeqElem(faces_last, k);
if (!same_face(r, r_last, imgCamera, imgCamera_last, i, k)) {
save_face(r, imgCamera, imgFace, scale, filecounter);
filecounter++;
//report_faces(filecounter, faces_new->total, model); // report new faces stored starting from filecounter
}
}
}
}
//cvClearMemStorage(storage2);
while (faces_last->total >0) {
cvSeqPop(faces_last);}
for(int i = 0; i < (faces ? faces->total : 0); i++ ){
// get the rect from the sequence
r = (CvRect*)cvGetSeqElem(faces, i);
cvSeqPush(faces_last, r);
}
//cout << "face_last:" << faces_last->total << "\n";}
cvClearMemStorage(storage);
}
counter++;
imgCamera_last = imgCamera;
}
cvReleaseCapture( &capture );
cvReleaseImage(&imgCamera);
cvReleaseImage(&imgDrawn);
cvReleaseImage(&imgFace);
cvDestroyWindow("window");
// return 0 to indicate successfull execution of the program
return 0;

OpenCV: Camera choosing - strange behavior (2 cameras)

I have few cameras in system. I initialise them this way
cap1 = cvCreateCameraCapture(0);
cap2 = cvCreateCameraCapture(1); // or -1
But after each execution their behaviour is different: they work together or both or them don't work or one of them captures well and other shows green screen. And sometimes system shows me dialogue box for choosing device.
Here is this part of source code:
CvCapture* cap2;
CvCapture* cap1;
printf("- Searching first cam : \n");
for (i; i < LASTCAM; i++)
{
cap1 = cvCreateCameraCapture(i);
if (!cap1)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
i++;
break;
}
}
printf("- Searching second cam : \n");
for (; i < LASTCAM; i++)
{
cap2 = cvCreateCameraCapture(i);
if (!cap2)
{
printf("-- Camera %d is empty \n", i);
}
else
{
printf("-- Camera %d is OK \n", i);
break;
}
} printf("Frame propeties:\n");
double width = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_WIDTH);
double height = cvGetCaptureProperty(cap1, CV_CAP_PROP_FRAME_HEIGHT);
printf("First cam : %.0f x %.0f\n", width, height );
double width2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_WIDTH);
double height2 = cvGetCaptureProperty(cap2, CV_CAP_PROP_FRAME_HEIGHT);
printf("Second cam : %.0f x %.0f\n\n", width2, height2 );
IplImage* frame1=0;
IplImage* frame2=0;
cvNamedWindow("cam1", CV_WINDOW_AUTOSIZE);
cvNamedWindow("cam2", CV_WINDOW_AUTOSIZE);
int counter=0;
char filename[512];
while(true){
frame1 = cvQueryFrame( cap1 );
frame2 = cvQueryFrame( cap2 );
cvShowImage("cam1", frame1);
cvShowImage("cam2", frame2);
...
what's wrong with it?
1-9 cams are empty; 10 - first cam, 11-infinity - returns cams which are "green screens".
Thanks beforehand.
Have you looked at the stereo mode? It looks like it's required if you want to run multiple cameras.
USB cameras (at least through directshow on windows) can be a little difficult.
Some things to try:
// A small delay between the captures
cap1 = cvCreateCameraCapture(0);
Sleep(100);
cap2 = cvCreateCameraCapture(1);
or
// call all the setup functiosn for camera0 before capturing camera1
cap1 = cvCreateCameraCapture(0);
cvGetCaptureProperty(cap1,......)
cap2 = cvCreateCameraCapture(1);
cvGetCaptureProperty(cap2,......)

OpenCV: process every frame

I want to write a cross-platform application using OpenCV for video capture. In all the examples, i've found frames from the camera are processed using the grab function and waiting for a while. And i want to process every frame in a sequence. I want to define my own callback function, which will be executed every time, when a new frame is ready to be processed (like in directshow for Windows, when you defining and putting into the graph your own filter for such purposes).
So the question is: how can i do this?
According to the code below, all callbacks would have to follow this definition:
IplImage* custom_callback(IplImage* frame);
This signature means the callback is going to be executed on each frame retrieved by the system. On my example, make_it_gray() allocates a new image to save the result of the grayscale conversion and returns it. This means you must free this frame later on your code. I added comments on the code about it.
Note that if your callback demands a lot of processing, the system might skip a few frames from the camera. Consider the suggestions Paul R and diverscuba23 did.
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
typedef IplImage* (*callback_prototype)(IplImage*);
/*
* make_it_gray: our custom callback to convert a colored frame to its grayscale version.
* Remember that you must deallocate the returned IplImage* yourself after calling this function.
*/
IplImage* make_it_gray(IplImage* frame)
{
// Allocate space for a new image
IplImage* gray_frame = 0;
gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);
if (!gray_frame)
{
fprintf(stderr, "!!! cvCreateImage failed!\n" );
return NULL;
}
cvCvtColor(frame, gray_frame, CV_RGB2GRAY);
return gray_frame;
}
/*
* process_video: retrieves frames from camera and executes a callback to do individual frame processing.
* Keep in mind that if your callback takes too much time to execute, you might loose a few frames from
* the camera.
*/
void process_video(callback_prototype custom_cb)
{
// Initialize camera
CvCapture *capture = 0;
capture = cvCaptureFromCAM(-1);
if (!capture)
{
fprintf(stderr, "!!! Cannot open initialize webcam!\n" );
return;
}
// Create a window for the video
cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
IplImage* frame = 0;
char key = 0;
while (key != 27) // ESC
{
frame = cvQueryFrame(capture);
if(!frame)
{
fprintf( stderr, "!!! cvQueryFrame failed!\n" );
break;
}
// Execute callback on each frame
IplImage* processed_frame = (*custom_cb)(frame);
// Display processed frame
cvShowImage("result", processed_frame);
// Release resources
cvReleaseImage(&processed_frame);
// Exit when user press ESC
key = cvWaitKey(10);
}
// Free memory
cvDestroyWindow("result");
cvReleaseCapture(&capture);
}
int main( int argc, char **argv )
{
process_video(make_it_gray);
return 0;
}
EDIT:
I changed the code above so it prints the current framerate and performs a manual grayscale conversion. They are small tweaks on the code and I did it for education purposes so one knows how to perform operations at pixel level.
#include <stdio.h>
#include <time.h>
#include "cv.h"
#include "highgui.h"
typedef IplImage* (*callback_prototype)(IplImage*);
/*
* make_it_gray: our custom callback to convert a colored frame to its grayscale version.
* Remember that you must deallocate the returned IplImage* yourself after calling this function.
*/
IplImage* make_it_gray(IplImage* frame)
{
// New IplImage* to store the processed image
IplImage* gray_frame = 0;
// Manual grayscale conversion: ugly, but shows how to access each channel of the pixels individually
gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, frame->nChannels);
if (!gray_frame)
{
fprintf(stderr, "!!! cvCreateImage failed!\n" );
return NULL;
}
for (int i = 0; i < frame->width * frame->height * frame->nChannels; i += frame->nChannels)
{
gray_frame->imageData[i] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //B
gray_frame->imageData[i+1] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //G
gray_frame->imageData[i+2] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //R
}
return gray_frame;
}
/*
* process_video: retrieves frames from camera and executes a callback to do individual frame processing.
* Keep in mind that if your callback takes too much time to execute, you might loose a few frames from
* the camera.
*/
void process_video(callback_prototype custom_cb)
{
// Initialize camera
CvCapture *capture = 0;
capture = cvCaptureFromCAM(-1);
if (!capture)
{
fprintf(stderr, "!!! Cannot open initialize webcam!\n" );
return;
}
// Create a window for the video
cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
double elapsed = 0;
int last_time = 0;
int num_frames = 0;
IplImage* frame = 0;
char key = 0;
while (key != 27) // ESC
{
frame = cvQueryFrame(capture);
if(!frame)
{
fprintf( stderr, "!!! cvQueryFrame failed!\n" );
break;
}
// Calculating framerate
num_frames++;
elapsed = clock() - last_time;
int fps = 0;
if (elapsed > 1)
{
fps = floor(num_frames / (float)(1 + (float)elapsed / (float)CLOCKS_PER_SEC));
num_frames = 0;
last_time = clock() + 1 * CLOCKS_PER_SEC;
printf("FPS: %d\n", fps);
}
// Execute callback on each frame
IplImage* processed_frame = (*custom_cb)(frame);
// Display processed frame
cvShowImage("result", processed_frame);
// Release resources
cvReleaseImage(&processed_frame);
// Exit when user press ESC
key = cvWaitKey(10);
}
// Free memory
cvDestroyWindow("result");
cvReleaseCapture(&capture);
}
int main( int argc, char **argv )
{
process_video(make_it_gray);
return 0;
}
Quick thoughts would be to have 2 threads, the first thread is responsible for grabbing the frames and notifiy the second thread when they are available (places them in a processing queue), the second thread does all your processing in an event loop type manner.
See boost::thread and boost::signals2 as those two together should provide most of the framework (except for the queue) for what I described above.