cvQueryFrame() returns grey frames - c++

cvQueryFrame() returns grey frames UNLESS I put a put a breakpoint at cvQueryFrame(capture). The program just needs to hit the breakpoint once and then afterwards I get proper frames from the camera. I've tried delays, dummy frames, combination of the two but it just doesn't seem to work without that breakpoint.
cvNamedWindow("video", CV_WINDOW_AUTOSIZE);
CvCapture *capture = cvCaptureFromCAM(1);
if (capture == NULL)
{
return -1;
}
Mat frame;
for(int i = 0;i<10;i++)
{
frame = cvQueryFrame(capture);
}
while(1)
{
try
{
frame = cvQueryFrame(capture);
imshow("video", frame);
char c = cvWaitKey(1);
if(c == 33)
break;
}
catch(Exception e)
{
break;
}
}
cvReleaseCapture( &capture);
cvDestroyWindow( "video" );
return 0;

You can try setting your cvWaitKey value to 5 or 10? The camera needs time to deliver the next frame and draw the previous one. Using the waitKey allows the openCV the time to draw the image to the screen. It is possible that you are now grabbing frames correctly, but that you are unable to show them properly.

Related

How to use "raspicam::RaspiCam_Cv" instead of "CvCapture *capture = cvCaptureFromAVI(a.avi)" in OpenCv C++

I am detecting shapes in real time with the help of OpenCv in C++ programming language. I found a code that reads from the folder and detect shapes. But in My case camera should detect in real time. How can I use raspicam::RaspiCam_Cv capture; instead of CvCapture *capture = cvCaptureFromAVI("a.avi"); in C++.
#include <cv.h>
#include <highgui.h>
using namespace std;
IplImage* imgTracking=0;
int lastX1 = -1;
int lastY1 = -1;
int lastX2 = -1;
int lastY2 = -1;
void trackObject(IplImage* imgThresh){
CvSeq* contour; //hold the pointer to a contour
CvSeq* result; //hold sequence of points of a contour
CvMemStorage *storage = cvCreateMemStorage(0); //storage area for all contours
//finding all contours in the image
cvFindContours(imgThresh, storage, &contour, sizeof(CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
//iterating through each contour
while(contour)
{
//obtain a sequence of points of the countour, pointed by the variable 'countour'
result = cvApproxPoly(contour, sizeof(CvContour), storage, CV_POLY_APPROX_DP, cvContourPerimeter(contour)*0.02, 0);
//if there are 3 vertices in the contour and the area of the triangle is more than 100 pixels
if(result->total==3 && fabs(cvContourArea(result, CV_WHOLE_SEQ))>100 )
{
//iterating through each point
CvPoint *pt[3];
for(int i=0;i<3;i++){
pt[i] = (CvPoint*)cvGetSeqElem(result, i);
}
int posX=( pt[0]->x + pt[1]->x + pt[2]->x )/3;
int posY=( pt[0]->y + pt[1]->y + pt[2]->y )/3;
if(posX > 360 ){
if(lastX1>=0 && lastY1>=0 && posX>=0 && posY>=0){
// Draw a red line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX1, lastY1), cvScalar(0,0,255), 4);
}
lastX1 = posX;
lastY1 = posY;
}
else{
if(lastX2>=0 && lastY2>=0 && posX>=0 && posY>=0){
// Draw a blue line from the previous point to the current point
cvLine(imgTracking, cvPoint(posX, posY), cvPoint(lastX2, lastY2), cvScalar(255,0,0), 4);
}
lastX2 = posX;
lastY2 = posY;
}
}
//obtain the next contour
contour = contour->h_next;
}
cvReleaseMemStorage(&storage);
}
int main(){
//load the video file to the memory
CvCapture *capture = cvCaptureFromAVI("a.avi");
if(!capture){
printf("Capture failure\n");
return -1;
}
IplImage* frame=0;
frame = cvQueryFrame(capture);
if(!frame) return -1;
//create a blank image and assigned to 'imgTracking' which has the same size of original video
imgTracking=cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U, 3);
cvZero(imgTracking); //covert the image, 'imgTracking' to black
cvNamedWindow("Video");
//iterate through each frames of the video
while(true){
frame = cvQueryFrame(capture);
if(!frame) break;
frame=cvCloneImage(frame);
//smooth the original image using Gaussian kernel
cvSmooth(frame, frame, CV_GAUSSIAN,3,3);
//converting the original image into grayscale
IplImage* imgGrayScale = cvCreateImage(cvGetSize(frame), 8, 1);
cvCvtColor(frame,imgGrayScale,CV_BGR2GRAY);
//thresholding the grayscale image to get better results
cvThreshold(imgGrayScale,imgGrayScale,100,255,CV_THRESH_BINARY_INV);
//track the possition of the ball
trackObject(imgGrayScale);
// Add the tracking image and the frame
cvAdd(frame, imgTracking, frame);
cvShowImage("Video", frame);
//Clean up used images
cvReleaseImage(&imgGrayScale);
cvReleaseImage(&frame);
//Wait 10mS
int c = cvWaitKey(10);
//If 'ESC' is pressed, break the loop
if((char)c==27 ) break;
}
cvDestroyAllWindows();
cvReleaseImage(&imgTracking);
cvReleaseCapture(&capture);
return 0;
}
I cannot use raspicam::RaspiCam_Cv capture; keyword instead of CvCapture *capture = cvCaptureFromAVI(); I should detect shapes in real time for example when triangle comes then call some function. Please help me

OpenCV: Image window keeps hanging and not responding

The following code draws a line connecting two points user clicks on an image:
using namespace cv;
using namespace std;
void onMouse(int evt, int x, int y, int flags, void* param) {
if(evt == CV_EVENT_LBUTTONDOWN) {
std::vector<cv::Point>* ptPtr = (std::vector<cv::Point>*)param;
ptPtr->push_back(cv::Point(x,y));
}
}
int main()
{
std::vector<Point> points;
cv::namedWindow("Output Window");
Mat frame = cv::imread("chhha.png");
cv::setMouseCallback("Output Window", onMouse, (void*)&points);
int X1=0, Y1=0, X2=0, Y2=0;
while(1)
{
cv::imshow("Output Window", frame);
if (points.size() > 1) //we have 2 points
{
for (auto it = points.begin(); it != points.end(); ++it)
{
}
break;
}
waitKey(10);
}
//just for testing that we are getting pixel values
X1=points[0].x;
X2=points[1].x;
Y1=points[0].y;
Y2=points[1].y;
cout<<"First and second X coordinates are given below"<<endl;
cout<<X1<<'\t'<<X2<<endl;
cout<<"First and second Y coordinates are given below"<<endl;
cout<<Y1<<'\t'<<Y2<<endl;
// Now let us draw a line on the image
line( frame, points[0], points[1], 'r', 2, 8 );
cv::imshow("Output Window", frame);
waitKey( 10 );
getch();
return 0;
}
The problem is the program is not exiting if I close (by clicking the cross button on the image on top right )the "Output Window", instead it hangs and says Not responding.
How do I remove this problem ?
The reason that your application does not exit is that you have an infinite loop, and clicking on the cross to close a window does nothing to break that loop.
One way to exit is to test for a key being pressed, e.g.
while(true)
{
...
char c = cv::waitKey(10);
if(c == 'q')
break;
}
BTW, assuming you are on windows, if a window is destroyed waitKey() intercepts a WM_DESTROY message. In this case waitKey() returns message.wParam, but the windows doco for WM_DESTROY says This parameter is not used. It looks like a bug to me, but it might be worth investigating whether waitKey() returns a consistent value when the window is closed - normally -1 is returned if no key is pressed.

Draw on webcam using OpenCV

I want to draw/paint on a webcam screen using OpenCV. Since I'm reading from a cam, the frames are constantly changing, so I'm trying to figure out a way to keep or save the drawing on the current frame and use it for the next frame. The code below allows you to draw on the screen but when it gets the next frame, the drawing is gone and it starts over.
Could someone please help me ... Thanks.
CvCapture *input;
input = cvCaptureFromCAM( 0 );
cvSetMouseCallback("Demo",&on_mouse, 0);
for(;;)
{
frame = cvQueryFrame(input);
if(!image)
{
image = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3);
screenBuffer = cvCreateImage( cvSize(frame->width, frame->height), IPL_DEPTH_8U, 3);
}
cvCopy(frame, image, 0);
if(drawing) //drawing is a global variable
{
cvCircle(image, cvPoint(last_x,last_y), 10,CV_RGB(red,green,blue), -1, CV_AA, 0);
cvCopy(image, screenBuffer, 0);
}
cvShowImage( "Demo", screenBuffer );
}
void on_mouse( int event, int x, int y, int flags, void* param )
{
last_x = x;
last_y = y;
if(event==CV_EVENT_LBUTTONDOWN)
{
drawing = 1;
}
}
Draw into a separate image and then cvAdd() that to the video image immediately before dispalying it
I will not go into all the details why your approach is bad, but keep in mind that creating 2 extra frames for drawing is a little bit too much.
It's important that you realize that all this kinky stuff is being done on the same thread used to capture new frames. This means what exactly? It means that the extra code you are adding inside the loop will slow the process of capturing and displaying new frames. In other words, you are sabotaging yourself by lowering the framerate of your application. If you don't care, it's ok. If you do, my tip for you is that you stack the captured frames into a buffer and have another thread read, process and display them.
Ok, so you REALLY want to draw over the window that's displaying the captured frames. Well, the obvious thing you can't do (and you discovered this yourself) is that the drawing cannot be made on the captured frame because the frame it's replaced with new data on every loop. So what do you do? You create a 2nd frame to do the drawing. Let's call it the drawing_frame.
The only thing that will be on the drawing_frame are the circles that will appear when the mouse moves over the window, when the LBUTTON of the mouse is clicked (a 2nd click switches between ON/OFF).
After the drawing of the circle occurs, the drawing_frame is overlayed on top on the frame captured by the camera. This process is a little expensive on the CPU, and since we are doing it in the main thread of the application, it will also decrease the framerate.
I strongly suggest that everyone interested in adding/merging/overlaying transparent frames with OpenCV take a look at Transparent image overlays in OpenCV.
By the way, I'm using cvCaptureFromCAM(-1) becouse I'm on Linux. You probably should change that to whatever works for you. According to your post it's cvCaptureFromCAM(0).
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
int drawing = 0;
int last_x = 0;
int last_y = 0;
void on_mouse(int event, int x, int y, int flags, void* param)
{
last_x = x;
last_y = y;
if (event == CV_EVENT_LBUTTONDOWN)
{
// switches between On and Off
if (drawing)
drawing = 0;
else
drawing = 1;
}
}
int main()
{
CvCapture* capture = NULL;
if ((capture = cvCaptureFromCAM(-1)) == NULL)
{
fprintf(stderr, "ERROR: capture is NULL \n");
return -1;
}
cvNamedWindow("mywindow", CV_WINDOW_AUTOSIZE);
cvQueryFrame(capture); // Sometimes needed to get correct data
cvSetMouseCallback("mywindow",&on_mouse, 0);
IplImage* frame = NULL;
IplImage* drawing_frame = NULL;
while (1)
{
if ((frame = cvQueryFrame(capture)) == NULL)
{
fprintf( stderr, "ERROR: cvQueryFrame failed\n");
break;
}
if (frame == NULL)
{
fprintf( stderr, "WARNING: cvQueryFrame returned NULL, sleeping..\n");
usleep(100000);
continue;
}
if (!drawing_frame) // This frame is created only once
{
drawing_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, frame->nChannels);
cvZero(drawing_frame);
}
if (drawing)
{
cvCircle(drawing_frame, cvPoint(last_x,last_y), 10,CV_RGB(0, 255, 0), -1, CV_AA, 0);
// For overlaying (copying transparent images) in OpenCV
// http://www.aishack.in/2010/07/transparent-image-overlays-in-opencv/
for (int x = 0; x < frame->width; x++)
{
for (int y = 0; y < frame->height; y++)
{
CvScalar source = cvGet2D(frame, y, x);
CvScalar over = cvGet2D(drawing_frame, y, x);
CvScalar merged;
CvScalar S = { 1,1,1,1 };
CvScalar D = { 1,1,1,1 };
for(int i = 0; i < 4; i++)
merged.val[i] = (S.val[i] * source.val[i] + D.val[i] * over.val[i]);
cvSet2D(frame, y, x, merged);
}
}
}
cvShowImage("mywindow", frame);
int key = cvWaitKey(10);
if (key == 113) // q was pressed on the keyboard
break;
}
cvReleaseImage(&frame);
cvReleaseImage(&drawing_frame);
cvReleaseCapture(&capture);
cvDestroyWindow("mywindow");
return 0;
}
You usually will have problems of adding images (they will eventually saturate), so I guess thats why you start over. I see you have color images... if you use more powerful stuff like OpenGL for your drawing you could use the overlay for your drawings. Otherwise check this out:
http://aishack.in/tutorials/transparent-image-overlays-in-opencv/

OpenCV unable to capture image from isight webcam

I can not capture image from my webcam using following OpenCV code.
The code can show images from a local AVI file or a video device. It works fine on a "test.avi" file.
When I make use my default webcam(CvCapture* capture =cvCreateCameraCapture(0)), the program can detected the size of the image from webcam,but just unable to display the image.
/I forgot to mention that I can see the iSight is working because the LED indicator is turn on/
Anyone encounter the same problem?
cvNamedWindow( "Example2", CV_WINDOW_AUTOSIZE );
CvCapture* capture =cvCreateFileCapture( "C:\\test.avi" ) ;// display images from avi file, works well
// CvCapture* capture =cvCreateCameraCapture(0); //display the frame(images) from default webcam not work
assert( capture );
IplImage* image;
while(1) {
image = cvQueryFrame( capture );
if( !image ) break;
cvShowImage( "Example2", image );
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Example2" );
opencv 2.2
Debug library *d.lib
WebCam isight
Macbook OS win7 32
VS2008
I'm working on opencv 2.3 with Macbook pro Mid 2012 and I had that problem with the Isight cam. Somehow I managed to make it work on opencv by simply adjusting the parameters of the Cvcapture and adjusting the frame width and height:
CvCapture* capture = cvCaptureFromCAM(0);
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_WIDTH, 500 );
cvSetCaptureProperty( capture, CV_CAP_PROP_FRAME_HEIGHT, 600 );
You can also change these numbers to the frame width and height you want.
Did you try the example from the opencv page?
namely,
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
Mat edges;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame; // get a new frame from camera
cvtColor(frame, edges, CV_BGR2GRAY);
GaussianBlur(edges, edges, Size(7,7), 1.5, 1.5);
Canny(edges, edges, 0, 30, 3);
imshow("edges", edges);
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Works on a macbook pro for me (although on OS X). If it doesn't work, some kind of error message would be helpful.
Try this:
int main(int, char**) {
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) { // check if we succeeded
cout << "===couldn't open camera" << endl;
return -1;
}
Mat edges, frame;
frame = cv::Mat(10, 10, CV_8U);
namedWindow("edges", 1);
for (;;) {
cap >> frame; // get a new frame from camera
cout << "frame size: " << frame.cols << endl;
if (frame.cols > 0 && frame.rows > 0) {
imshow("edges", frame);
}
if (waitKey(30) >= 0)
break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}
Latest update! Problem solved!
This happen to be one of OpenCV 2.2′s bug
Here is how to fix it:
http://dusijun.wordpress.com/2011/01/11/opencv-unable-to-capture-image-from-isight-webcam/
Why dont you try
capture=cvCaptureFromCam(0);
I think this may work.
Let me know about wheather its working or not.

OpenCV: process every frame

I want to write a cross-platform application using OpenCV for video capture. In all the examples, i've found frames from the camera are processed using the grab function and waiting for a while. And i want to process every frame in a sequence. I want to define my own callback function, which will be executed every time, when a new frame is ready to be processed (like in directshow for Windows, when you defining and putting into the graph your own filter for such purposes).
So the question is: how can i do this?
According to the code below, all callbacks would have to follow this definition:
IplImage* custom_callback(IplImage* frame);
This signature means the callback is going to be executed on each frame retrieved by the system. On my example, make_it_gray() allocates a new image to save the result of the grayscale conversion and returns it. This means you must free this frame later on your code. I added comments on the code about it.
Note that if your callback demands a lot of processing, the system might skip a few frames from the camera. Consider the suggestions Paul R and diverscuba23 did.
#include <stdio.h>
#include "cv.h"
#include "highgui.h"
typedef IplImage* (*callback_prototype)(IplImage*);
/*
* make_it_gray: our custom callback to convert a colored frame to its grayscale version.
* Remember that you must deallocate the returned IplImage* yourself after calling this function.
*/
IplImage* make_it_gray(IplImage* frame)
{
// Allocate space for a new image
IplImage* gray_frame = 0;
gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);
if (!gray_frame)
{
fprintf(stderr, "!!! cvCreateImage failed!\n" );
return NULL;
}
cvCvtColor(frame, gray_frame, CV_RGB2GRAY);
return gray_frame;
}
/*
* process_video: retrieves frames from camera and executes a callback to do individual frame processing.
* Keep in mind that if your callback takes too much time to execute, you might loose a few frames from
* the camera.
*/
void process_video(callback_prototype custom_cb)
{
// Initialize camera
CvCapture *capture = 0;
capture = cvCaptureFromCAM(-1);
if (!capture)
{
fprintf(stderr, "!!! Cannot open initialize webcam!\n" );
return;
}
// Create a window for the video
cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
IplImage* frame = 0;
char key = 0;
while (key != 27) // ESC
{
frame = cvQueryFrame(capture);
if(!frame)
{
fprintf( stderr, "!!! cvQueryFrame failed!\n" );
break;
}
// Execute callback on each frame
IplImage* processed_frame = (*custom_cb)(frame);
// Display processed frame
cvShowImage("result", processed_frame);
// Release resources
cvReleaseImage(&processed_frame);
// Exit when user press ESC
key = cvWaitKey(10);
}
// Free memory
cvDestroyWindow("result");
cvReleaseCapture(&capture);
}
int main( int argc, char **argv )
{
process_video(make_it_gray);
return 0;
}
EDIT:
I changed the code above so it prints the current framerate and performs a manual grayscale conversion. They are small tweaks on the code and I did it for education purposes so one knows how to perform operations at pixel level.
#include <stdio.h>
#include <time.h>
#include "cv.h"
#include "highgui.h"
typedef IplImage* (*callback_prototype)(IplImage*);
/*
* make_it_gray: our custom callback to convert a colored frame to its grayscale version.
* Remember that you must deallocate the returned IplImage* yourself after calling this function.
*/
IplImage* make_it_gray(IplImage* frame)
{
// New IplImage* to store the processed image
IplImage* gray_frame = 0;
// Manual grayscale conversion: ugly, but shows how to access each channel of the pixels individually
gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, frame->nChannels);
if (!gray_frame)
{
fprintf(stderr, "!!! cvCreateImage failed!\n" );
return NULL;
}
for (int i = 0; i < frame->width * frame->height * frame->nChannels; i += frame->nChannels)
{
gray_frame->imageData[i] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //B
gray_frame->imageData[i+1] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //G
gray_frame->imageData[i+2] = (frame->imageData[i] + frame->imageData[i+1] + frame->imageData[i+2])/3; //R
}
return gray_frame;
}
/*
* process_video: retrieves frames from camera and executes a callback to do individual frame processing.
* Keep in mind that if your callback takes too much time to execute, you might loose a few frames from
* the camera.
*/
void process_video(callback_prototype custom_cb)
{
// Initialize camera
CvCapture *capture = 0;
capture = cvCaptureFromCAM(-1);
if (!capture)
{
fprintf(stderr, "!!! Cannot open initialize webcam!\n" );
return;
}
// Create a window for the video
cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
double elapsed = 0;
int last_time = 0;
int num_frames = 0;
IplImage* frame = 0;
char key = 0;
while (key != 27) // ESC
{
frame = cvQueryFrame(capture);
if(!frame)
{
fprintf( stderr, "!!! cvQueryFrame failed!\n" );
break;
}
// Calculating framerate
num_frames++;
elapsed = clock() - last_time;
int fps = 0;
if (elapsed > 1)
{
fps = floor(num_frames / (float)(1 + (float)elapsed / (float)CLOCKS_PER_SEC));
num_frames = 0;
last_time = clock() + 1 * CLOCKS_PER_SEC;
printf("FPS: %d\n", fps);
}
// Execute callback on each frame
IplImage* processed_frame = (*custom_cb)(frame);
// Display processed frame
cvShowImage("result", processed_frame);
// Release resources
cvReleaseImage(&processed_frame);
// Exit when user press ESC
key = cvWaitKey(10);
}
// Free memory
cvDestroyWindow("result");
cvReleaseCapture(&capture);
}
int main( int argc, char **argv )
{
process_video(make_it_gray);
return 0;
}
Quick thoughts would be to have 2 threads, the first thread is responsible for grabbing the frames and notifiy the second thread when they are available (places them in a processing queue), the second thread does all your processing in an event loop type manner.
See boost::thread and boost::signals2 as those two together should provide most of the framework (except for the queue) for what I described above.