I'm trying to display a video file at 25fps smoothly without any lag. The code below does this, but only achieves about 10fps, taking about 0.1ms to execute. With cvWaitKey(1) I get around 0.03 to 0.04ms, which would be perfect, but the named window just stays grey and doesn't show the video!
Is this because cvShowImage() is too slow? Is there any other way to speed up the code and output the video smoothly?
See my code below.
Thanks a lot in advance,
Adrian
#include <cv.h>
#include <iostream>
#include <highgui.h>
#include <cxcore.h>
#include <cvaux.h>
#include <sstream>
#include <time.h>
using namespace std;
using namespace cv;
int main(int argc, char** argv)
{
CvCapture* vid = 0;
IplImage* input; //Input image
int fps;
int i=0;
clock_t start, end;
/*Creates the GUI to output the processed images*/
cvNamedWindow("Video input", 0);
/*Open input video*/
if (argc!=2)
{
cout << "Please specify an input video." << endl;
return -1;
}
else
{
vid=cvCreateFileCapture(argv[1]);
if (!vid)
{
cout << "Could not extract frame." << endl;
return -1;
}
}
input = cvQueryFrame(vid);
fps = (int)cvGetCaptureProperty(vid, CV_CAP_PROP_FPS);
cout << fps << endl;
cout << "Video found." << endl;
/*Extraction loop */
while (input)
{
start = clock();
cout << flush;
cout << i << "\r";
i++;
/*Show image*/
cvShowImage("Video input", input);
cvWaitKey(2); //Wait is needed or else we see a grey box
input = cvQueryFrame(vid); //Read next frame from video
end = clock();
cout << (double)(end-start)/CLOCKS_PER_SEC << " s" << endl;
};
/*Release the allocated memory for the frames */
cvReleaseImage(&input);
cvDestroyWindow("Video input");
return 1;
}
Have you tried this without all the cout stuff?
The debug build of the Microsoft STL has cout handling which is almost unbelievably slow.
Try calling cvWaitKey with 1000 / fps wanted, in your case :
cvWaitKey(1000/25)
You could try something like:
char key = cvWaitKey(10); //waits 10 milliseconds
if (key == 27) //and if ESC is pressed, get out of the loop
break;
Related
Following my previous question, I want to extract frames from a video. But only 2 seconds at the beginning of the video.
I want to work with raw video as much as I can, that why I don't want to cut the original video and then process it.
Here's my code to extract frame. But this will extract all frame from the video :
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
#include <sstream>
using namespace cv;
using namespace std;
int c = 0;
string int2str(int &);
int main(int argc, char **argv) {
string s;
VideoCapture cap("test_video.mp4");
if (!cap.isOpened())
{
cout << "Cannot open the video file" << endl;
return -1;
}
double fps = cap.get(CV_CAP_PROP_FPS);
cout << "Frame per seconds : " << fps << endl;
namedWindow("MyVideo", CV_WINDOW_NORMAL);
resizeWindow("MyVideo", 600, 600);
while (1)
{
Mat frame;
Mat Gray_frame;
bool bSuccess = cap.read(frame);
if (!bSuccess)
{
cout << "Cannot read the frame from video file" << endl;
break;
}
s = int2str(c);
//cout<<("%d\n",frame )<<endl;
c++;
imshow("MyVideo", frame);
imwrite("frame" + s + ".jpg", frame);
if (waitKey(30) == 27)
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
string int2str(int &i) {
string s;
stringstream ss(s);
ss << i;
return ss.str();
}
And advice ? Thanks.
It seems like you already know the FPS of the video. So isn't it just a matter of counting how many frames you have extracted and breaking after you reach FPS * 2?
double fps = cap.get(CV_CAP_PROP_FPS);
int framesToExtract = fps * 2;
for (int frames = 0; frames < framesToExtract; ++frames)
{
... // your processing here
}
Also, I looked at your previous question and it seems like you have a misunderstanding of FPS?
FPS = Frames Per Second, this means how many frames there are every second of the video. So let's say your video is 120 FPS. This means there are 120 frames in one second of video, 240 in two seconds and so on.
So (VIDEO_FPS * x) gets you the amount of frames in x seconds of the video.
I want to crop a certain area of my raw video before extracting the frames. I don't want to use external cropping video software because I need the video to be raw for processing. Is it possible to crop a certain area before extraction ? I read about ROI for images, is it possible for video too ?
Here's my extraction code that's already working :
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
#include <sstream>
using namespace cv;
using namespace std;
int c = 0;
string int2str(int &);
int main(int argc, char **argv) {
string s;
VideoCapture cap("test_video.mov");
if (!cap.isOpened())
{
cout << "Cannot open the video file" << endl;
return -1;
}
double fps = cap.get(CV_CAP_PROP_FPS);
cout << "Frame per seconds : " << fps << endl;
namedWindow("MyVideo", CV_WINDOW_NORMAL);
resizeWindow("MyVideo", 600, 600);
while (1)
{
Mat frame;
Mat Gray_frame;
bool bSuccess = cap.read(frame);
if (!bSuccess)
{
cout << "Cannot read the frame from video file" << endl;
break;
}
s = int2str(c);
//cout<<("%d\n",frame )<<endl;
c++;
imshow("MyVideo", frame);
imwrite("frame" + s + ".jpg", frame);
if (waitKey(30) == 27)
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
string int2str(int &i) {
string s;
stringstream ss(s);
ss << i;
return ss.str();
}
Still need to crop the video, any advice ?
I'm following a tutorial to extract video frames. I've read this question, it doesn't work, also queationfrom Open CV Answer, but the solution is for capturing current frame. I have a 120fps video and want to extract all of them. Here's my code
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <string>
#include <sstream>
using namespace cv;
using namespace std;
int c = 0;
string int2str(int &);
int main(int argc, char **argv) {
string s;
VideoCapture cap("test.mp4"); // video
if (!cap.isOpened())
{
cout << "Cannot open the video file" << endl;
return -1;
}
double fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
namedWindow("MyVideo", CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
while (1)
{
Mat frame;
Mat Gray_frame;
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess)
{
cout << "Cannot read the frame from video file" << endl;
break;
}
s = int2str(c);
//cout<<("%d\n",frame )<<endl;
c++;
imshow("MyVideo", frame); //show the frame in "MyVideo" window
imwrite("ig" + s + ".jpg", frame);
if (waitKey(30) == 27) //esc key
{
cout << "esc key is pressed by user" << endl;
break;
}
}
return 0;
}
//int to string function
string int2str(int &i) {
string s;
stringstream ss(s);
ss << i;
return ss.str();
}
My problem is, I have a minute video at 120fps. I expected to be able to extract 120 frames. But the extraction went wrong, the video last for 1 minute but I got more than 200 frames stored in my folder. Did I do something wrong in my code ?
I am trying to read data from an industrial camera using the V4l linux driver and C++. I would like to display the result using the OpenCV. I read the buffer, create an Mat object, which actually contains values in range 0...255.
The problem seems to be the imshow() call. When commenting this line out, an actual window without an image is displayed. Once uncommented no window is diplayed and also no output in terminal after this line is shown. I am not able to find a solution on my own, all examples I found look the same as my code to me.
Here is the code:
#include <fcntl.h>
#include "opencv/cv.h"
#include "opencv/highgui.h"
#include <libv4l2.h>
#include <libv4l1.h>
#include <linux/videodev2.h>
#include <sys/ioctl.h>
#define BUFFERSIZE 357120 // 744 * 480
using namespace cv;
using namespace std;
int main(int argc, char **argv) {
int cameraHandle, i;
unsigned char pictureBuffer[BUFFERSIZE];
char cameraDevice[] = "/dev/video0";
struct v4l2_control V4L2_control;
/* open camera device */
if (( cameraHandle = v4l1_open(cameraDevice, O_RDONLY)) == -1 ){
printf("Unable to open the camera");
return -1;
}
// disable auto exposure
V4L2_control.id = V4L2_CID_EXPOSURE_AUTO;
V4L2_control.value = V4L2_EXPOSURE_SHUTTER_PRIORITY;
ioctl(cameraHandle, VIDIOC_S_CTRL, &V4L2_control);
// set exposure time
V4L2_control.id = V4L2_CID_EXPOSURE_ABSOLUTE;
V4L2_control.value = 2;
ioctl(cameraHandle, VIDIOC_S_CTRL, &V4L2_control);
// get 5 pictures to warm up the camera
for (i = 0; i <= 5; i++){
v4l1_read(cameraHandle, pictureBuffer, BUFFERSIZE);
}
// show pictures
Mat mat = Mat(744, 480, CV_8UC3, (void*)pictureBuffer);
cout << "M = " << endl << " " << mat << endl << endl; // display the image data
namedWindow("imagetest", CV_WINDOW_AUTOSIZE );
imshow("imagetest", mat);
waitKey(30);
cout << "test output" << endl;
//clenup
v4l1_close(cameraHandle);
destroyWindow("imagetest");
return 0;
}
EDIT:
Well, after running the code in terminal instead of ecipse I saw a segmentation fault Even commenting everything behind the
cout << "M = " << endl << " " << mat << endl << endl;
line gives me this error.
Solved. The problem lied in the wrong file format. CV_8UC1 or CV_8U instead of CV_8UC3 brought and an output. The difference between those formats is described here: In OpenCV, what's the difference between CV_8U and CV_8UC1?
I am trying to write a program that is able to capture images from two different cameras in two different threads. I want to do this because when I do this in the same thread I have to keep waiting for cvQueryFrame twice the amount of time and so i can not grab images at 30 fps (I get 15 FPS from each camera).
I have taken a look at this SO post, but this only works for one camera. Using cvQueryFrame and boost::thread together
My current program gives varying results, sometimes it gives memory leaks, usually I just don't see anything happening and sometimes it worls for a few seconds but the the image freezes again. The strange thing is that earlier when I didn't call cvShowImage, but had my imageProcessing function do something useful I could see that I was getting real time results from both cameras. I assume this means that it is possible to make this work, but that I made a stupid mistake somewhere. My OS is LINUX and I am using OpenCV 2.4
My code:
#include <iostream>
#include <cstdio>
#include <cv.h>
#include <ml.h>
#include <cvaux.h>
#include <highgui.h>
#include <vector>
#include <stdio.h>
#include "producer_consumer_queue.hpp"
//Camera settings
int cameraWidth = 1280;
int cameraHeight = 720;
int waitKeyValue = 5;
bool threads_should_exit = false;
CvCapture * capture;
CvCapture * capture2;
using namespace std;
using namespace cv;
void grabFrame(concurrent_queue<IplImage* > * frame_queue, int camNumber) {
try {
//Load first frames
cout << "grabFrame: " << camNumber << " init with " << cameraWidth << " x " << cameraHeight << endl;
IplImage* frame;
if (camNumber == 0)frame = cvQueryFrame(capture);
if (camNumber == 1)frame = cvQueryFrame(capture2);
while (frame && !threads_should_exit) {
if (camNumber == 0)frame = cvQueryFrame(capture);
if (camNumber == 1)frame = cvQueryFrame(capture2);
IplImage* frame_copy = NULL;
frame_copy = cvCloneImage(frame);
if (camNumber == 0)cvShowImage("NE", frame);
cout << "grabFrame: " << camNumber << " pushing back to queue" << endl;
frame_queue->push(frame_copy);
int k = cvWaitKey(waitKeyValue);
if (k == 1048603 || k == 27 || k == '\r') {
cout << "grabFrame: Process killed" << endl;
//release memory
threads_should_exit = true;
}
}
} catch (const concurrent_queue<IplImage* >::Canceled & e) {
cout << "grabFrame: Show thread is canceled" << endl;
return;
}
}
void processFrames(concurrent_queue<IplImage* > * frame_queue0, concurrent_queue<IplImage* > * frame_queue1) {
try {
do {
cout << "processFrames: Processing two frames" << endl;
IplImage* frm = NULL;
frame_queue0->wait_and_pop(frm);
IplImage * frm2 = NULL;
frame_queue1->wait_and_pop(frm2);
cvReleaseImage(&frm);
cvReleaseImage(&frm2);
} while (!threads_should_exit);
} catch (const concurrent_queue<IplImage* >::Canceled & e) {
cout << "processFrames: Processing thread is canceled" << endl;
return;
}
}
int main() {
capture = cvCreateCameraCapture(0);
capture2 = cvCreateCameraCapture(1);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, cameraWidth);
cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, cameraHeight);
cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_WIDTH, cameraWidth);
cvSetCaptureProperty(capture2, CV_CAP_PROP_FRAME_HEIGHT, cameraHeight);
boost::thread_group frame_workers;
boost::thread_group frame_workers2;
concurrent_queue<IplImage* > frame_queue(&frame_workers);
concurrent_queue<IplImage* > frame_queue2(&frame_workers2);
boost::thread * query_thread = new boost::thread(processFrames, &frame_queue, &frame_queue2);
boost::thread * cam0_thread = new boost::thread(grabFrame, &frame_queue, 0);
usleep(10000);
boost::thread * cam1_thread = new boost::thread(grabFrame, &frame_queue2, 1);
frame_workers.add_thread(query_thread);
frame_workers.add_thread(cam0_thread);
frame_workers2.add_thread(query_thread);
frame_workers2.add_thread(cam1_thread);
while (true) {
if (threads_should_exit) {
cout << "Main: threads should be killed" << endl;
while (!frame_queue.empty()) {
usleep(10000);
}
frame_workers.remove_thread(query_thread);
frame_workers2.remove_thread(query_thread);
frame_workers.remove_thread(cam0_thread);
frame_workers2.remove_thread(cam1_thread);
frame_workers.join_all();
break;
}
usleep(10000);
}
return 0;
}
EDIT:
I added a simple function to detect a piece of paper to see if everything is working fine when I don't call on cvShowImage(). My program can detect a piece of paper fine if I don't call cvShowImage(). If I do the program has strange behavior again and freezes etc.
There should only be one thread manipulating the GUI (this is true for just about any GUI framework). You should organize your code so that cvShowImage is only invoked from the main "GUI thread".
It seems that the work that's being done in query_thread could just as easily be done inside the main thread.