how to Extract Video Frames and Save them as Images using c++ - c++

I am now trying to save the frames of the video file into images on my pc. I am using Visual studio 2010 and opencv 2.3.1. In this code(shown bellow) it can save frames of image sequence but for video file I can not save the frames.
the problem specifically seems to be here:
in the function of vidoeprocessing()
string imageToSave =("output_MOG_" + frameNumberString + ".png");
bool saved = imwrite( imageToSave,fgMaskMOG);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}
any one can help to solve this?
Thanks in advance.
my code is this:
//opencv
#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
//#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc/imgproc.hpp>
//#include "opencv2/videoio.hpp"
#include <opencv2/video/video.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
// Global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask fg mask generated by MOG method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
int keyboard; //input from keyboard
/** Function Headers */
void help();
void processVideo(char* videoFilename);
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "--------------------------------------------------------------------- -----" << endl
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process both videos (-vid) and images (-img)." << endl
<< endl
<< "Usage:" << endl
<< "./bs {-vid <video filename>|-img <image filename>}" << endl
<< "for example: ./bs -vid video.avi" << endl
<< "or: ./bs -img /data/images/1.png" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
/**
* #function main
*/
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG ");
//create Background Subtractor objects
pMOG = new BackgroundSubtractorMOG();
if(strcmp(argv[1], "-vid") == 0) {
//input data coming from a video
processVideo(argv[2]);
}
else if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
//function processVideo
void processVideo(char* videoFilename) {
//create the capture object
VideoCapture capture(videoFilename);
if(!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open video file: " << videoFilename << endl;
exit(EXIT_FAILURE);
}
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//read the current frame
if(!capture.read(frame)) {
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
pMOG->operator()(frame, fgMaskMOG,0.9);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG ", fgMaskMOG);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
/*
string imageToSave =("output_MOG_" + frameNumberString + ".bmp");
bool saved = imwrite( imageToSave,fgMaskMOG);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}
*/
//delete capture object
capture.release();
}
/**
* #function processImages
*/
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(frame.empty()){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
}
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
pMOG->operator()(frame, fgMaskMOG,0.9);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG ", fgMaskMOG);
//get the input from the keyboard
keyboard = waitKey( 30 );
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(frame.empty()){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
// save subtracted images
string imageToSave =("output_MOG_" + frameNumberString + ".png");
bool saved = imwrite( imageToSave,fgMaskMOG);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}
}
}

This is a small sample using your webcam. You can easily adapt it to use a video:
#include "opencv2/opencv.hpp"
using namespace cv;
int main(int, char**)
{
VideoCapture cap(0); // open the default camera
if (!cap.isOpened()) // check if we succeeded
return -1;
Ptr<BackgroundSubtractor> pMOG = new BackgroundSubtractorMOG2();
Mat fg_mask;
Mat frame;
int count = -1;
for (;;)
{
// Get frame
cap >> frame; // get a new frame from camera
// Update counter
++count;
// Background subtraction
pMOG->operator()(frame, fg_mask);
imshow("frame", frame);
imshow("fg_mask", fg_mask);
// Save foreground mask
string name = "mask_" + std::to_string(count) + ".png";
imwrite("D:\\SO\\temp\\" + name, fg_mask);
if (waitKey(1) >= 0) break;
}
// the camera will be deinitialized automatically in VideoCapture destructor
return 0;
}

Related

How to switch cameras to get a 360 degrees view on an object?

I'm trying to find a way to switch between several cameras in different positions around an object, to get a 360 degrees view in real time using C++. I'm working with opencv and I succeeded to open all the cameras, but I couldn't find the documentation or a way to have that 360 degree fluent switch.
this is the code I made so far
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
int a = 0;
VideoCapture cap(0); // open the video camera no. 0
VideoCapture cap1(1);
//if (!cap.isOpened()) // if not success, exit program
//{
// cout << "Cannot open the video cam" << endl;
// return -1;
//}
double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
cout << "Frame size : " << dWidth << " x " << dHeight << endl;
namedWindow("MyVideo", CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
while (1)
{
Mat frame;
bool bSuccess;
if(a==0)
bSuccess = cap.read(frame); // read a new frame from video
else if (a == 1)
bSuccess = cap1.read(frame);
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read a frame from video stream" << endl;
break;
}
imshow("MyVideo", frame); //show the frame in "MyVideo" window
if (waitKey(1) == 'a')
{
if (!a) a = 1;
else a = 0;
}
/*else if (waitKey(1) == 27)
{
cout << "esc key is pressed by user" << endl;
break;
}
I have 6 other cameras, and an object in the middle
is it possible to do it in C++? or do I need to use something else. Keeping in mind that it has to be done in real time.

Debug assertion failed - C++ (OpenCV) - pops up after the program terminates

I am using OpenCV version 2.4.9 and Visual Studio 2015. I am sure all the dependencies between them are working, since other sample programs worked perfectly, using OpenCV libraries.
You can find here my code:
#include <opencv2/opencv.hpp>
#include <stdio.h>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
String face_cascade_name = "C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml";
String eye_cascade_name = "C:\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml";
Mat faceDetect(Mat img);
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
using namespace cv;
using namespace std;
enum EmotionState_t {
SERIOUS = 0, // 0
SMILE, // 1
SURPRISED, // 2
};
static void read_csv(const string& filename, vector<Mat>& images,
vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if (!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int main(int argc, const char *argv[])
{
EmotionState_t emotion;
if (!face_cascade.load(face_cascade_name)) {
printf("--(!)Error loading\n"); return -1; };
if (!eyes_cascade.load(eye_cascade_name)) {
printf("--(!)Error loading\n"); return -1; };
// 0 is the ID of the built-in laptop camera, change if you want to useother camera
VideoCapture cap(0);
//check if the file was opened properly
if (!cap.isOpened())
{
std::cout << "Capture could not be opened succesfully" << endl;
return -1;
}
else
{
std::cout << "camera is ok.. Stay 2 ft away from your camera\n" << endl;
}
int w = 432;
int h = 240;
cap.set(CV_CAP_PROP_FRAME_WIDTH, w);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, h);
Mat frame;
cap >> frame;
std::cout << "processing the image...." << endl;
Mat testSample = faceDetect(frame);
// Get the path to your CSV.
string fn_csv = "C:\\Users\\Omar\\Downloads\\test_canny\\my_csv.txt";
// These vectors hold the images and corresponding labels.
vector<Mat>* images;
images = new vector<Mat>;
vector<int>* labels;
labels = new vector<int>;
// Read in the data. This can fail if no valid
// input filename is given.
try
{
read_csv(fn_csv, *images, *labels);
}
catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: "
<< e.msg << endl;
// nothing more we can do
exit(1);
}
// Quit if there are not enough images for this demo.
if ((*images).size() <= 1)
{
string error_message = "This demo needs at least 2 images to work.Please add more images to your data set!";
CV_Error(CV_StsError, error_message);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size:
int height = (*images)[0].rows;
// The following lines create an Fisherfaces model for
// face recognition and train it with the images and
// labels read from the given CSV file.
// If you just want to keep 10 Fisherfaces, then call
// the factory method like this:
//
// cv::createFisherFaceRecognizer(10);
//
// However it is not useful to discard Fisherfaces! Please
// always try to use _all_ available Fisherfaces for
// classification.
//
// If you want to create a FaceRecognizer with a
// confidence threshold (e.g. 123.0) and use _all_
// Fisherfaces, then call it with:
//
// cv::createFisherFaceRecognizer(0, 123.0);
//
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
model->train(*images, *labels);
// The following line predicts the label of a given
// test image:
int predictedLabel = model->predict(testSample);
// To get the confidence of a prediction call the model with:
//
// int predictedLabel = -1;
// double confidence = 0.0;
// model->predict(testSample, predictedLabel, confidence);
//
string result_message = format("Predicted class = %d", predictedLabel);
std::cout << result_message << endl;
// giving the result
switch (predictedLabel)
{
case SMILE:
std::cout << "You are happy!" << endl;
break;
case SURPRISED:
std::cout << "You are surprised!" << endl;
break;
case SERIOUS:
std::cout << "You are serious!" << endl;
break;
}
return 0;
}
Mat faceDetect(Mat img)
{
std::vector<Rect>* faces;
faces = new vector<Rect>;
std::vector<Rect>* eyes;
eyes = new vector<Rect>;
bool two_eyes = false;
bool any_eye_detected = false;
//detecting faces
face_cascade.detectMultiScale(img, *faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE,
Size(30, 30));
if ((*faces).size() == 0)
{
std::cout << "Try again.. I did not dectected any faces..." << endl;
exit(-1); // abort everything
}
Point p1 = Point(0, 0);
for (size_t i = 0; i < (*faces).size(); i++)
{
// we cannot draw in the image !!! otherwise will mess with the prediction
// rectangle( img, faces[i], Scalar( 255, 100, 0 ), 4, 8, 0 );
Mat frame_gray;
cvtColor(img, frame_gray, CV_BGR2GRAY);
// croping only the face in region defined by faces[i]
std::vector<Rect>* eyes;
eyes = new vector<Rect>;
Mat faceROI = frame_gray((*faces)[i]);
//In each face, detect eyes
eyes_cascade.detectMultiScale(faceROI, *eyes, 1.1, 3, 0
| CV_HAAR_SCALE_IMAGE, Size(30, 30));
for (size_t j = 0; j < (*eyes).size(); j++)
{
Point center((*faces)[i].x + (*eyes)[j].x + (*eyes)[j].width*0.5,
(*faces)[i].y + (*eyes)[j].y + (*eyes)[j].height*0.5);
// we cannot draw in the image !!! otherwise will mess with the prediction
// int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
// circle( img, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
if (j == 0)
{
p1 = center;
any_eye_detected = true;
}
else
{
two_eyes = true;
}
}
}
std::cout << "SOME DEBUG" << endl;
std::cout << "-------------------------" << endl;
std::cout << "faces detected:" << (*faces).size() << endl;
std::cout << "x: " << (*faces)[0].x << endl;
std::cout << "y: " << (*faces)[0].y << endl;
std::cout << "w: " << (*faces)[0].width << endl;
std::cout << "h: " << (*faces)[0].height << endl << endl;
Mat imageInRectangle;
imageInRectangle = img((*faces)[0]);
Size recFaceSize = imageInRectangle.size();
std::cout << recFaceSize << endl;
// for debug
imwrite("C:\\Users\\Omar\\Downloads\\test_canny\\imageInRectangle.jpg", imageInRectangle);
int rec_w = 0;
int rec_h = (*faces)[0].height * 0.64;
// checking the (x,y) for cropped rectangle
// based in human anatomy
int px = 0;
int py = 2 * 0.125 * (*faces)[0].height;
Mat cropImage;
std::cout << "faces[0].x:" << (*faces)[0].x << endl;
p1.x = p1.x - (*faces)[0].x;
std::cout << "p1.x:" << p1.x << endl;
if (any_eye_detected)
{
if (two_eyes)
{
std::cout << "two eyes detected" << endl;
// we have detected two eyes
// we have p1 and p2
// left eye
px = p1.x / 1.35;
}
else
{
// only one eye was found.. need to check if the
// left or right eye
// we have only p1
if (p1.x > recFaceSize.width / 2)
{
// right eye
std::cout << "only right eye detected" << endl;
px = p1.x / 1.75;
}
else
{
// left eye
std::cout << "only left eye detected" << endl;
px = p1.x / 1.35;
}
}
}
else
{
// no eyes detected but we have a face
px = 25;
py = 25;
rec_w = recFaceSize.width - 50;
rec_h = recFaceSize.height - 30;
}
rec_w = ((*faces)[0].width - px) * 0.75;
std::cout << "px :" << px << endl;
std::cout << "py :" << py << endl;
std::cout << "rec_w:" << rec_w << endl;
std::cout << "rec_h:" << rec_h << endl;
cropImage = imageInRectangle(Rect(px, py, rec_w, rec_h));
Size dstImgSize(70, 70); // same image size of db
Mat finalSizeImg;
resize(cropImage, finalSizeImg, dstImgSize);
// for debug
imwrite("C:\\Users\\Omar\\Downloads\\test_canny\\onlyface.jpg", finalSizeImg);
cvtColor(finalSizeImg, finalSizeImg, CV_BGR2GRAY);
return finalSizeImg;
}
I've debugged it and the error only pops up when I reach return 0 in the main method.
And here's an image of the error (too large to embed)
Any help will be appreciated.

ERROR: Background Subtraction Coding Doesn't Functioning using openCV and VS2013

I had ran a coding for background subtraction from the video for offline. I used openCV 2.4.9 and Visual Studio 2013 to run the coding. The video can display and play but the problem is the coding for background subtraction does not functioning. Can anybody help me what is the wrong for my coding? Someone had tell me the error is
if (strcmp(argv[1], "-vid") == 0)
{
//input data coming from a video
processVideo(argv[2]);
}
so, what i need to do? Help me please...
// BackgroundSubtraction_Success.cpp : Defines the entry point for the console application.
//
#include <stdio.h>
#include "stdafx.h"
#include <iostream>
#include <opencv2\core\core.hpp>
#include <opencv2\flann\flann.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\photo\photo.hpp>
#include <opencv2\video\video.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\objdetect\objdetect.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <opencv2\ml\ml.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\contrib\contrib.hpp>
//#include "opencv\cv.h"
//#include <opencv2\core\core_c.h>
//#include <opencv2\highgui\highgui_c.h>
//#include <opencv2\imgproc\imgproc_c.h>
using namespace cv;
using namespace std;
//global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Ptr <BackgroundSubtractorMOG> pMOG; //MOG Background Subtractor
int keyboard;
//function declarations
void help();
void processVideo(char*Background);
void help()
{
cout
<< "----------------------------------------" << endl
<< endl
<< "This program begins with Motion Detection" << endl
<< "using Background Subtraction" << endl
<< endl
<< "------------------------------------------" << endl;
}
int main(int argc, char*argv[])
{
VideoCapture cap("C:/Users/user/Documents/Visual Studio 2013/Projects/cobaan/NewOpenCV_Success/sample1.avi"); // open the video file for reading
if (!cap.isOpened()) // if not success, exit program
{
cout << "Cannot open the video file" << endl;
return -1;
}
//cap.set(CV_CAP_PROP_POS_MSEC, 300); //start the video at 300ms
double fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
namedWindow("MyVideo", CV_WINDOW_AUTOSIZE); //create a window called "MyVideo"
while (1)
{
Mat frame;
bool bSuccess = cap.read(frame); // read a new frame from video
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
break;
}
imshow("MyVideo", frame); //show the frame in "MyVideo" window
if (waitKey(30) == 27) //wait for 'esc' key press for 30 ms. If 'esc' key is pressed, break loop
{
cout << "esc key is pressed by user" << endl;
break;
}
}
//print help information
help();
//check for the input parameter correctness
if (argc != 3)
{
cerr << "incorrect input list" << endl;
cerr << "exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG");
//call the constructor
BackgroundSubtractorMOG bgmog;
bgmog(frame, fgMaskMOG);
//create background subtractor objects
//pMOG = createBackgroundSubtractorMOG(); //MOG approach
if (strcmp(argv[1], "-vid") == 0)
{
//input data coming from a video
processVideo(argv[2]);
}
else
{
//error in reading input parameter
cerr << "Please check the input parameters." << endl;
cerr << "Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
//call video
void processVideo(char* MyVideo)
{
//create the capture object
VideoCapture capture(MyVideo);
if (!capture.isOpened())
{
//error in opening the video input
cerr << "Unable to open video file: " <<MyVideo << endl;
exit(EXIT_FAILURE);
}
//read input data ESC or 'q' for quitting
while ((char)keyboard != 'q' && (char)keyboard != 27)
{
//read the current frame
if (!capture.read(frame))
{
cerr << "Unable to read next frame" << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
//pMOG->apply(frame, fgMaskMOG);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100, 20), cv::Scalar(255, 255, 255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15), FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
//show the current frame and fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
//get input from the keyboard
keyboard = waitKey(30);
}
//delete capture object
capture.release();
}

Opencv; How to free IplImage*?

I am trying to get a hang of OpenCV. At the moment I am trying to subtract two frames from each other and display the result. I have found example code which will do that just fine.
My problem is that I am getting a memory allocation error.
Well, nothing to special about that, because I am feeding the program with HD video.
So my question is how do I release the allocated memory of an IplImage*?
The Mat type has something like Mat.release().
IplImage does not have that, nor does free(IplImage) work.
Here is my code:
#include <opencv2\opencv.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\highgui\highgui_c.h>
using namespace cv;
int main()
{
std::string videofilename;
std::cout << "Please specify the video name (make sure it is in the same folder\nas the application!):" << std::endl;
std::cin >> videofilename;
std::cout << "The name you provided: " << videofilename << std::endl;
VideoCapture video(videofilename);
if(!video.isOpened())
{
std::cout << "Could not open video file" << std::endl;
return -1;
}
std::cout << "Number of frames: " << video.get(CV_CAP_PROP_FRAME_COUNT) << std::endl;
std::cout << "Duration: "<< static_cast<int>(video.get(CV_CAP_PROP_FRAME_COUNT))/(30*60) << "min " << static_cast<int>((video.get(CV_CAP_PROP_FRAME_COUNT)))%(30*60)/30 << "sek" << std::endl;
// Close it before opening for playing
video.release();
CvCapture* capture = cvCaptureFromAVI(videofilename.c_str());
IplImage* frame = cvQueryFrame(capture);
IplImage* currframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
if ( !capture )
{
std::cout << "Could not open video file" << std::endl;
return -1;
}
cvNamedWindow("dest", CV_WINDOW_AUTOSIZE);
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)
{
printf("Capture Finished\n");
break;
}
currframe = cvCloneImage(frame); // copy frame to current
frame = cvQueryFrame(capture); // grab frame
if(!frame)
{
printf("Capture Finished\n");
break;
}
cvSub(frame, currframe, destframe); // subtraction between the last frame to cur
cvShowImage("dest", destframe);
//cvReleaseImage(currframe); // doesn't work
//free(currframe); // doesnt work either
//delete(currframe); //again, no luck
cvWaitKey(30);
}
cvDestroyWindow("dest");
cvReleaseCapture(&capture);
return 0;
}
You can free IplImage using cvReleaseImage. It takes address of a pointer to an IplImage, i.e. IplImage** as argument, so you have to do this:
cvReleaseImage(&currframe);
instead of cvReleaseImage(currframe);.
But keep in mind, the image returned by cvQueryFrame, (frame in your case) is a special case and it should not be released or modified. Also, you don't have to preallocate currFrame if you are going to initialize it using cvCloneImage eventually.
The final code would look like this:
int main()
{
std::string videofilename;
std::cout << "Please specify the video name (make sure it is in the same folder\nas the application!):" << std::endl;
std::cin >> videofilename;
std::cout << "The name you provided: " << videofilename << std::endl;
VideoCapture video(videofilename);
if(!video.isOpened())
{
std::cout << "Could not open video file" << std::endl;
return -1;
}
std::cout << "Number of frames: " << video.get(CV_CAP_PROP_FRAME_COUNT) << std::endl;
std::cout << "Duration: "<< static_cast<int>(video.get(CV_CAP_PROP_FRAME_COUNT))/(30*60) << "min " << static_cast<int>((video.get(CV_CAP_PROP_FRAME_COUNT)))%(30*60)/30 << "sek" << std::endl;
// Close it before opening for playing
video.release();
CvCapture* capture = cvCaptureFromAVI(videofilename.c_str());
IplImage* frame = cvQueryFrame(capture);
IplImage* currframe;
IplImage* destframe = cvCreateImage(cvGetSize(frame),IPL_DEPTH_8U,3);
if ( !capture )
{
std::cout << "Could not open video file" << std::endl;
return -1;
}
cvNamedWindow("dest", CV_WINDOW_AUTOSIZE);
while(1)
{
frame = cvQueryFrame(capture);
if(!frame)
{
printf("Capture Finished\n");
break;
}
currframe = cvCloneImage(frame); // copy frame to current
frame = cvQueryFrame(capture); // grab frame
if(!frame)
{
printf("Capture Finished\n");
break;
}
cvSub(frame, currframe, destframe); // subtraction between the last frame to cur
cvShowImage("dest", destframe);
cvWaitKey(30);
cvReleaseImage(&currframe);
}
cvDestroyWindow("dest");
cvReleaseCapture(&capture);
return 0;
}
Use cvReleaseImage(). For example, if you want to release IplImage* frame, use cvReleaseImage(&frame).
For Mat, you don't need to release it explicitly. It will release automatically when out of its code block.
Edit: take a look at here on more details about cvReleaseImage(), which addresses on some wrong releasing situations.

Functions in OpenCV

Hi I have written the following code in OpenCV. Basically it reads a video from file. Now, I want to create a function to resize the video but I am unsure how to call the "VideoCapture" class from the main function. I have written a sample function to see if it'll read anything but it compiles fine showing stuff from the main function but nothing from the newly created function. Any help? P.S I'm not very experienced, bear with me LOL.
using namespace cv;
using namespace std;
void resize_video(VideoCapture capture);
int main(int argc, char** argv)
{
VideoCapture capture; //the C++ API class to capture the video from file
if(argc == 2)
capture.open(argv[1]);
else
capture.open(0);
if(!capture.isOpened())
{
cout << "Cannot open video file " << endl;
return -1;
}
Mat frame;
namedWindow("display", CV_WINDOW_AUTOSIZE);
cout << "Get the video dimensions " << endl;
int fps = capture.get((int)CV_CAP_PROP_FPS);
int height = capture.get((int)CV_CAP_PROP_FRAME_HEIGHT);
int width = capture.get((int)CV_CAP_PROP_FRAME_WIDTH);
int noF = capture.get((int)CV_CAP_PROP_FRAME_COUNT);
CvSize size = cvSize(width , height);
cout << "Dimensions: " << width << height << endl;
cout << "Number of frames: " << noF << endl;
cout << "Frames per second: " << fps << endl;
while(true)
{
capture >> frame;
if(frame.empty())
break;
imshow("display", frame);
if (waitKey(30)== 'i')
break;
}
//resize_video();
}
void resize_video(VideoCapture capture)
{
cout << "Begin resizing video " << endl;
//return 0;
}
you want to call your function INSIDE the while loop, not after it (too late, program over)
so, it might look like this:
void resize_video( Mat & image )
{
//
// do your processing
//
cout << "Begin resizing video " << endl;
}
and call it like:
while(true)
{
capture >> frame;
if(frame.empty())
break;
resize_video(frame);
imshow("display", frame);
if (waitKey(30)== 'i')
break;
}