Playing a video in OpenCV - c++

I am a beginner to OpenCV and I wish to play a video in OpenCV. I've made a code but it's displaying a single image only.
I am using OpenCV 2.1 and Visual Studio 2008.
I would really appreciate it if someone guided me where am I going wrong.
Here is my pasted code:
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
int main()
{
CvCapture* capture = cvCaptureFromAVI("C:/OpenCV2.1/samples/c/tree.avi");
IplImage* img = 0;
if(!cvGrabFrame(capture)){ // capture a frame
printf("Could not grab a frame\n\7");
exit(0);}
cvQueryFrame(capture); // this call is necessary to get correct
// capture properties
int frameH = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT);
int frameW = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH);
int fps = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
int numFrames = (int) cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
///numFrames=total number of frames
printf("Number of rows %d\n",frameH);
printf("Number of columns %d\n",frameW,"\n");
printf("frames per second %d\n",fps,"\n");
printf("Number of frames %d\n",numFrames,"\n");
for(int i=0;i<numFrames;i++)
{
IplImage* img = 0;
img=cvRetrieveFrame(capture);
cvNamedWindow( "img" );
cvShowImage("img", img);
}
cvWaitKey(0);
cvDestroyWindow( "img" );
cvReleaseImage( &img );
cvReleaseCapture(&capture);
return 0;
}

You have to use cvQueryFrame instead of cvRetrieveFrame. Also as pointed out by #Chipmunk, you have to add a delay after cvShowImage.
#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
cvNamedWindow( "img" );
for(int i=0;i<numFrames;i++)
{
IplImage* img = cvQueryFrame(capture);
cvShowImage("img", img);
cvWaitKey(10);
}
Here is the complete method to play a video using OpenCV:
int main()
{
CvCapture* capture = cvCreateFileCapture("C:/OpenCV2.1/samples/c/tree.avi");
IplImage* frame = NULL;
if(!capture)
{
printf("Video Not Opened\n");
return -1;
}
int width = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH);
int height = (int)cvGetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT);
double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
int frame_count = (int)cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT);
printf("Video Size = %d x %d\n",width,height);
printf("FPS = %f\nTotal Frames = %d\n",fps,frame_count);
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)
{
printf("Capture Finished\n");
break;
}
cvShowImage("video",frame);
cvWaitKey(10);
}
cvReleaseCapture(&capture);
return 0;
}

After showing a image on the window, there has to be a delay or a wait before the next image can be show, I think you can guess why that is. Okay so for that delay we use cvWaitKey() .
And thats what I have added in the code in the loop.
cvNamedWindow( "img" );
for(int i=0;i<numFrames;i++)
{
IplImage* img = 0;
img=cvRetrieveFrame(capture);
cvShowImage("img", img);
cvWaitKey(10);
}

Related

Track bar is not working properly in opencv

I have written a program in opencv(c++) to manipulate camera property. I am trying to blur my camera display using "track bar". The code is working but in certain condition. It works, when i change the position of "track bar" using mouse click. But if i tried to slide the track bar it gives me an error as mention below.
Here is my code
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
Mat image, image_blurred;
int slider=5;
float sigma=0.3 *((slider - 1)*0.5 - 1) +0.8;
void on_Trackbar(int,void *)
{
int k_size = max(1,slider);
//k_size = k_size%2 == 0 ? k_size+1 : k_size;
setTrackbarPos("kernel","Blur window",3);
sigma=0.3 *((slider - 1)*0.5 - 1) +0.8;
GaussianBlur(image,image_blurred,Size(3,3),sigma);
}
int main()
{
Mat img;
VideoCapture cap(0);
if(!cap.isOpened())
{
cout<<"Camera is not successfully opened"<<endl;
return -1;
}
namedWindow("original image",CV_WINDOW_AUTOSIZE);
namedWindow("Blur Image",CV_WINDOW_AUTOSIZE);
while(!char(waitKey(30)=='q') && cap.isOpened())
{
cap>>img;
GaussianBlur(img,image_blurred,Size(slider,slider),sigma);
createTrackbar("kernel","Blur Image",&slider,21,on_Trackbar);
imshow("Blur Image",image_blurred);
imshow("original image",img);
}
destroyAllWindows();
return 0;
}
Please give your valuable views. Thanks in advance!!.
In the while loop, you're passing an invalid value to GaussianBlur, since slider can also be an even number.
You can correct this introducing a new variable int kernel_size = 2*slider+1. slider now is the radius of the kernel, and kernel_size is guaranteed to be odd.
Also you don't need to call GaussianBlur in the callback function, since it's already called in the main loop. The only goal of the callback is to update the values of kernel_size and sigma.
This code will work as expected:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
Mat image, image_blurred;
int slider = 0;
int kernel_size = 3;
float sigma = 0.3 *((kernel_size - 1)*0.5 - 1) + 0.8;
void on_Trackbar(int, void *)
{
kernel_size = 2 * slider + 1;
sigma = 0.3 *((kernel_size - 1)*0.5 - 1) + 0.8;
}
int main()
{
Mat img;
VideoCapture cap(0);
if (!cap.isOpened())
{
cout << "Camera is not successfully opened" << endl;
return -1;
}
namedWindow("original image", CV_WINDOW_AUTOSIZE);
namedWindow("Blur Image", CV_WINDOW_AUTOSIZE);
createTrackbar("kernel", "Blur Image", &slider, 21, on_Trackbar);
while (!char(waitKey(30) == 'q') && cap.isOpened())
{
cap >> img;
GaussianBlur(img, image_blurred, Size(kernel_size, kernel_size), sigma);
imshow("Blur Image", image_blurred);
imshow("original image", img);
}
destroyAllWindows();
return 0;
}

OpenCV blob detection segfaults when filtering by color

I'm having trouble using the filterByColor functionality in the SimpleBlobDetector tool that ships with OpenCV. Make doesn't give me any errors, but when I try to run the program, it segfaults at blobme.detect().
It works fine when I use filterByArea, it's just filterByColor that's giving me headaches.
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#define ACTIVE_CHANNEL 2
int main(int argc, char* argv[])
{
if (argc != 3)
{
std::cout << "./image_proc <file> <thresh> (-1 for default)" << std::endl;
return -1;
}
cv::Mat test_im = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR);
cv::Mat hsvim, outim, channels[3], descriptor;
std::vector<cv::KeyPoint> keypoints;
// Convert to HSV
cv::cvtColor(test_im, hsvim, CV_RGB2HSV);
cv::split(hsvim, channels);
cv::SimpleBlobDetector::Params params;
params.filterByInertia = false;
params.filterByConvexity = false;
params.filterByColor = true;
params.filterByCircularity = false;
params.filterByArea = false;
params.blobColor = 255;
//params.minArea = 100.0f;
//params.maxArea = 500.0f;
// Trying to use blob detector
cv::SimpleBlobDetector blobme(params);
blobme.detect(channels[ACTIVE_CHANNEL], keypoints);
// Print keypoints
cv::drawKeypoints(channels[ACTIVE_CHANNEL], keypoints, outim);
// Display
cv::namedWindow("Display window", cv::WINDOW_AUTOSIZE);
cv::imwrite("imout.jpg", outim);
cv::imshow("Display window", outim);
cv::waitKey(0);
return 0;
}

How to extract Frames from AVI video

Hey peeps so far i manage OpenCV to play a video.avi but what should i do now to extract frames...?
below is the code i written so far that got my video playing:
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv\ml.h>
#include<opencv\cxcore.h>
int main( int argc, char** argv ) {
cvNamedWindow( "DisplayVideo", CV_WINDOW_AUTOSIZE );
CvCapture* capture = cvCreateFileCapture( argv[1] );
IplImage* frame;
while(1) {
frame = cvQueryFrame( capture );
if( !frame ) break;
cvShowImage( "DisplayVideo", frame );
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseCapture( &capture );
cvDestroyWindow("DisplayVideo" );
}
frame is the frame you are extracting. If you want to convert that to a cv::Mat you can do that by creating a mat with that IplImage:
Mat myImage(IplImage);
There is a nice tutorial on it here.
However, you are doing it the old way. The newest version of OpenCV has the latest camera capture abilities, and you should do something like this:
#include "cv.h"
#include "highgui.h"
using namespace cv;
int main()
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
namedWindow("Output",1);
while(true)
{
Mat frame;
cap >> frame; // get a new frame from camera
//Do your processing here
...
//Show the image
imshow("Output", frame);
if(waitKey(30) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}

Setting ROI with mouse from a rectangle on a video

I have video, when the program is run the video's first frame is taken as an image and the user is allowed to draw a rectangle on the image, after the rectangle is drawn, the user must right click on the image to confirm the rectangle. When the mouse the right-clicked the image disappears and the video starts to play with the drawn rectangle on it.
I am able to draw the rectangle perfectly but I can't set that rectangle as ROI.
What I want to do is to set that rectangle as Region of Interest (ROI) to do some image processing on that ROI. I am unable to set the rectangle which I draw as ROI.
I am using OpenCV with Visual Studio 2010. Later on I will try to integrate this program in QT creator.
Any help would be appreciated.
Thanks in advance.
My full code is as follows:
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include<opencv2\opencv.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv/highgui.h>
#include <opencv/cxcore.h>
#include <opencv\cvaux.h>
using namespace cv;
using namespace std;
void my_mouse_callback( int event, int x, int y, int flags, void* param );
bool destroy=false;
CvRect box;
IplImage* image;
IplImage* frame2;
bool drawing_box = false;
void draw_box( IplImage* img, CvRect rect)
{
cvRectangle( img, cvPoint(box.x, box.y), cvPoint(box.x+box.width,box.y+box.height),
cvScalar(0,0,255) ,2);
CvRect rect2=cvRect(box.x,box.y,box.width,box.height);
//cvSetImageROI(image, rect2); //here I wanted to set the drawn rect as ROI
}
// Implement mouse callback
void my_mouse_callback( int event, int x, int y, int flags, void* param ){
IplImage* image = (IplImage*) param;
switch( event ){
case CV_EVENT_MOUSEMOVE:
if( drawing_box )
{
box.width = x-box.x;
box.height = y-box.y;
}
break;
case CV_EVENT_LBUTTONDOWN:
drawing_box = true;
box = cvRect( x, y, 0, 0 );
break;
case CV_EVENT_LBUTTONUP:
drawing_box = false;
if( box.width < 0 )
{
box.x += box.width;
box.width *= -1;
}
if( box.height < 0 )
{
box.y += box.height;
box.height *= -1;
}
draw_box( image, box);
break;
case CV_EVENT_RBUTTONUP:
destroy=true;
}
}
int main()
{
const char* name = "Box Example";
cvNamedWindow( name );
box = cvRect(0,0,1,1);
CvCapture* capture = cvCreateFileCapture( "C:\\video.mp4" );
image = cvQueryFrame( capture );
IplImage* temp = cvCloneImage( image );
// Set up the callback
cvSetMouseCallback( name, my_mouse_callback, (void*) image);
//IplImage *img2 = cvCreateImage(cvGetSize(temp),temp->depth,temp->nChannels);
//cvNot(temp,temp);
/* copy subimage */
//cvCopy(temp, temp, NULL);
// Main loop
while( 1 )
{
if(destroy) {cvDestroyWindow(name); break;}
cvCopyImage( image, temp );
if( drawing_box )
draw_box( temp, box );
cvMoveWindow(name, 200, 100);
cvShowImage( name, temp );
if( cvWaitKey( 15 )==27 )
break;
}
//cvReleaseImage( &image );
cvReleaseImage( &temp );
cvDestroyWindow( name );
cvNamedWindow( "Example2", CV_WINDOW_AUTOSIZE );
cvMoveWindow("Example2", 150, 150);
while(1)
{
frame2 = cvQueryFrame( capture );
draw_box(frame2,box);
if( !frame2 ) break;
cvShowImage( "Example2", frame2 );
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseCapture( &capture );
cvDestroyWindow( "Example2" );
return 0;
}
You were almost there. One problem though: case CV_EVENT_RBUTTONUP needs to break, and I would also add a break on default case.
The following code sets the ROI, performs a simple grayscale processing on it and then copies the processed ROI back to the original image.
For testing purposes I changed your code to use my camera instead of loading a file.
Output:
Code:
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <string.h>
#include <cv.h>
#include <highgui.h>
using namespace cv;
using namespace std;
void my_mouse_callback( int event, int x, int y, int flags, void* param );
bool destroy=false;
CvRect box;
bool drawing_box = false;
void draw_box(IplImage* img, CvRect rect)
{
cvRectangle(img, cvPoint(box.x, box.y), cvPoint(box.x+box.width,box.y+box.height),
cvScalar(0,0,255) ,2);
CvRect rect2=cvRect(box.x,box.y,box.width,box.height);
//cvSetImageROI(image, rect2); //here I wanted to set the drawn rect as ROI
}
// Implement mouse callback
void my_mouse_callback( int event, int x, int y, int flags, void* param )
{
IplImage* frame = (IplImage*) param;
switch( event )
{
case CV_EVENT_MOUSEMOVE:
{
if( drawing_box )
{
box.width = x-box.x;
box.height = y-box.y;
}
}
break;
case CV_EVENT_LBUTTONDOWN:
{
drawing_box = true;
box = cvRect( x, y, 0, 0 );
}
break;
case CV_EVENT_LBUTTONUP:
{
drawing_box = false;
if( box.width < 0 )
{
box.x += box.width;
box.width *= -1;
}
if( box.height < 0 )
{
box.y += box.height;
box.height *= -1;
}
draw_box(frame, box);
}
break;
case CV_EVENT_RBUTTONUP:
{
destroy=true;
}
break;
default:
break;
}
}
int main()
{
const char* name = "Box Example";
cvNamedWindow( name );
box = cvRect(0,0,1,1);
CvCapture* capture = cvCaptureFromCAM(0);
if (!capture)
{
printf("!!! Failed cvCaptureFromCAM\n");
return 1;
}
IplImage* image = cvQueryFrame(capture);
if (!image)
{
printf("!!! Failed cvQueryFrame #1\n");
return 2;
}
IplImage* temp = cvCloneImage(image);
// Set up the callback
cvSetMouseCallback(name, my_mouse_callback, (void*) image);
// Main loop
while( 1 )
{
if (destroy)
{
cvDestroyWindow(name); break;
}
cvCopyImage(image, temp);
if (drawing_box)
draw_box(temp, box);
cvMoveWindow(name, 200, 100);
cvShowImage(name, temp);
if (cvWaitKey(15) == 27)
break;
}
cvReleaseImage(&temp);
cvDestroyWindow(name);
cvNamedWindow("Example2", CV_WINDOW_AUTOSIZE);
cvMoveWindow("Example2", 150, 150);
// Retrieve a single frame from the device and set the ROI
IplImage* vid_frame = cvQueryFrame(capture);
if (!vid_frame)
{
printf("!!! Failed cvQueryFrame #2\n");
return 2;
}
cvSetImageROI(vid_frame, box);
// Allocate space for a single-channel ROI (to store grayscale frames)
IplImage* gray_roi = cvCreateImage(cvSize(box.width, box.height), IPL_DEPTH_8U, 1);
IplImage* rgb_roi = cvCreateImage(cvSize(box.width, box.height), IPL_DEPTH_8U, 3);
while(1)
{
if (!vid_frame)
{
vid_frame = cvQueryFrame(capture);
if (!vid_frame)
{
printf("!!! Failed cvQueryFrame #3\n");
break;
}
}
draw_box(vid_frame, box);
// Set ROI and perform some processing (in this case, converting the ROI to grayscale)
cvSetImageROI(vid_frame, box);
cvCvtColor(vid_frame, gray_roi, CV_BGR2GRAY);
//cvShowImage("Example2", gray_roi);
/* At this point gray_roi has the size of thei ROI and contains the processed image.
* For fun, we copy the processed image back to the original image and display it on the screen!
*/
cvCvtColor(gray_roi, rgb_roi, CV_GRAY2BGR);
// As the ROI is still set, cvCopy is affected by it
cvCopy(rgb_roi, vid_frame, NULL);
// Now reset the ROI so cvShowImage displays the full image
cvResetImageROI(vid_frame);
cvShowImage("Example2", vid_frame);
char c = cvWaitKey(33);
if( c == 27 ) break;
vid_frame = NULL;
}
cvSaveImage("processed.jpg", vid_frame);
cvReleaseImage(&gray_roi);
cvReleaseImage(&rgb_roi);
cvReleaseCapture( &capture );
cvDestroyWindow( "Example2" );
return 0;
}

How to know the size of a frame or image

This may seem trivial for most people but I am getting problems in determining the exact size i.e. the exact width and height of my video frames. I used cvGetSize but I am probably coding it inaccurately because I am getting an error. Is it possible to output the values of the width and height of my frames as I have included in my code below? Please I would appreciate it if someone could advise me on this.
#include "cv.h"
#include "highgui.h"
#include "iostream"
using namespace std;
int main( int argc, char* argv ) {
CvCapture *capture = NULL;
capture = cvCaptureFromAVI("C:\\walking\\lady walking.avi");
if(!capture){
return -1;
}
IplImage* color_frame = NULL;
IplImage* gray_frame = NULL ;
int thresh_frame = 17;
int frameCount=0;//Counts every 5 frames
cvNamedWindow( "contours", CV_WINDOW_AUTOSIZE );
while(1) {
color_frame = cvQueryFrame( capture );//Grabs the frame from a file
if( !color_frame ) break;
gray_frame = cvCreateImage(cvSize(color_frame->width, color_frame->height), color_frame->depth, 1);
if( !color_frame ) break;// If the frame does not exist, quit the loop
frameCount++;
if(frameCount==5)
{
cvCvtColor(color_frame, gray_frame, CV_BGR2GRAY);
cvThreshold(gray_frame, gray_frame, thresh_frame, 255, CV_THRESH_TOZERO_INV);
cvGetSize(gray_frame);
int w;
int h;
cvSize(w,h);
cout <<" dimensions " << cvSize(w, h) << endl;
cvShowImage("contours", gray_frame);
frameCount=0;
}
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseImage(&color_frame);
cvReleaseImage(&gray_frame);
cvReleaseCapture( &capture );
cvDestroyWindow( "contours" );
return 0;
}
Try the following code :)
The key point is the usage of cvGetSize function and CvSize structure.
#include "cv.h"
#include "highgui.h"
#include "iostream"
using namespace std;
int main( int argc, char* argv ) {
CvCapture *capture = NULL;
capture = cvCaptureFromAVI("C:\\walking\\lady walking.avi");
if(!capture){
return -1;
}
IplImage* color_frame = NULL;
IplImage* gray_frame = NULL ;
int thresh_frame = 17;
int frameCount=0;//Counts every 5 frames
cvNamedWindow( "contours", CV_WINDOW_AUTOSIZE );
while(1) {
color_frame = cvQueryFrame( capture );//Grabs the frame from a file
if( !color_frame ) break;
gray_frame = cvCreateImage(cvSize(color_frame->width, color_frame->height), color_frame->depth, 1);
if( !color_frame ) break;// If the frame does not exist, quit the loop
frameCount++;
if(frameCount==5)
{
cvCvtColor(color_frame, gray_frame, CV_BGR2GRAY);
cvThreshold(gray_frame, gray_frame, thresh_frame, 255, CV_THRESH_TOZERO_INV);
CvSize dim = cvGetSize(gray_frame);
cout <<" dimensions:: height:" << dim.height<<" width:"<< dim.width<< endl;
cvShowImage("contours", gray_frame);
frameCount=0;
}
char c = cvWaitKey(33);
if( c == 27 ) break;
}
cvReleaseImage(&color_frame);
cvReleaseImage(&gray_frame);
cvReleaseCapture( &capture );
cvDestroyWindow( "contours" );
return 0;
}