cvQueryFrame returns null - c++

i am trying to run this OpenCV tutorial code using CodeBlocks on windows :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
// Function Headers
void detectAndDisplay( Mat frame );
// Global variables
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
// #function main
int main( int argc, const char** argv ){
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ std::cout << "--(!)Error loading Face cascade\n"; return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ std::cout << "--(!)Error loading Eyes cascade\n"; return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( 0 );
if( capture ){
while( true ){
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() ){
detectAndDisplay( frame );
}
else{
std::cout << " --(!) No captured frame -- Break!"; break;
}
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
// #function detectAndDisplay
void detectAndDisplay( Mat frame ){
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ ){
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ ){
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
But the Output is " --(!) No captured frame -- Break!"
During the execution the camera flash blinks then the output is displayed.

Related

segmentation fault error in Opencv c++

I am trying to code a program on opencv to decide whether a human has approached ahead the camera. After I run the execution file, I get the captured video for few seconds and encounter the segmentation fault error.
The code is like this
Here are headers:
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
//define static variable
static int cApp = 0;//number of approached frame
static double last = 0;
//define functions
void detectAndDisplay( Mat frame );
bool computeArea( double width, double height, double lastArea);
double runningAverage(int M);
//define opencv function and classifier
String upperbody_cascade_name = "home/pi/opencv- 3.0.0/data/haarcascades/haarcascade_upperbody.xml";
CascadeClassifier upper_cascade;
String window_name = "Capture - upper body detection";
Here is the main function:
int main( void )
{
//define variable
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
upper_cascade.load("/home/pi/opencv-3.0.0/data/haarcascades/haarcascade_upperbody.xml");
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
char c = (char)waitKey(10);
if( c == 27 ) { break; } // escape
}
capture.release();
return 0;
}
Here is the detectAndDisplay function:
void detectAndDisplay( Mat frame )
{
std::vector<Rect> upperbodys;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect upperbodys
upper_cascade.detectMultiScale( frame_gray, upperbodys, 1.05, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
Point center( upperbodys[0].x + upperbodys[0].width/2, upperbodys[0].y + upperbodys[0].height/2 );
ellipse( frame, center, Size( upperbodys[0].width/2, upperbodys[0].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
bool ifApproached = computeArea(upperbodys[0].width/2, upperbodys[0].height/2, last);
if (ifApproached == true) {
cApp++;
}
if (cApp == 3) {
cout << "have approached" << endl;
cApp = cApp - 3;
}
//-- Show what you got
imshow( window_name, frame );
}
Here is the computeArea function:
bool computeArea( double width, double height, double lastArea) {
double newArea = width * height;
bool ifApproached = false;
//double presentArea = newArea;
double presentArea = runningAverage(newArea);
double DifferenceBewteenAreas = presentArea - lastArea;
if (DifferenceBewteenAreas > 1) {//threshold
ifApproached = true;
}
last = presentArea;
return ifApproached;
}
Here is runningAverage function:
double runningAverage(int M) {
//M is measurement
//#define LM_SIZE 5
static int LM[5];
static int index =0;
static long sum = 0;
static int count =0;
//keep sum updated to improve speed
sum = sum - LM[index];
LM[index] = M;
sum = sum + LM[index];
index++;
index = index % 5;
if (count < 5) {
count++;
}
return (double)(sum / (double)count);
}
I have searched many opencv segmentation fault questions, some said this segmentation fault was caused by wrong array used, but my case has little use of array. Others said misused of function characters could also cause this kind of errors, I agree with this, some of my characters could be wrong here.
Actually I found that I should not use upperbodys[0] in the code, because sometimes there are no object being detected at all,so there could be some memory read error happens, I used upperbodys[i] instead and it works well then.
void detectAndDisplay( Mat frame )
{
std::vector<Rect> upperbodys;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect upperbodys
upper_cascade.detectMultiScale( frame_gray, upperbodys, 1.05, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
int size = upperbodys.size();
double newArea = -1;
for (int i = 0 ; i < size; i++) {
Point center( upperbodys[i].x + upperbodys[i].width/2, upperbodys[i].y + upperbodys[i].height/2 );
ellipse( frame, center, Size( upperbodys[i].width/2, upperbodys[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
//bool ifApproached = computeArea(upperbodys[i].width/2, upperbodys[i].height/2, last);
//////////////////////////////////////////
newArea = upperbodys[i].width/2 * upperbodys[i].height/2;
if (newArea != -1) {
cout << "UpperBodys has value, index = " << i << endl;
break;
}
}
bool ifApproached = false;
//double presentArea = runningAverage(newArea);
double presentArea = newArea;
double DifferenceBewteenAreas = presentArea - last;
if (DifferenceBewteenAreas > 1) {//threshold
ifApproached = true;
}
last = presentArea;
//////////////////////////////////////////
if (ifApproached == true) {
cApp++;
}
if (cApp == 3) {
cout << "have approached" << endl;
cApp = cApp - 3;
}
//-- Show what you got
imshow( window_name, frame );
}

Capturing the frame when the Object is detected

i have the following code :
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
String object_cascade_name = "haarcascade_frontalface_alt.xml";
CascadeClassifier object_cascade;
string window_name = "Capture - detector";
int main( void )
{
VideoCapture capture;
Mat frame;
std::vector<Rect> objects;
Mat frame_gray;
if( !object_cascade.load( object_cascade_name ) ){ std::cout << "ERROR: Cascade not loaded!\n" ; return -1; };
capture.open( 0 );
if( capture.isOpened() ){
for(;;){
capture >> frame;
capture.retrieve(frame);
//-- 3. Apply the classifier to the frame
if( !frame.empty() ){
// Start
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect Object
object_cascade.detectMultiScale( frame_gray, objects, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < objects.size(); i++ ){
Point pt1 = Point( objects[i].x, objects[i].y );
Point pt2 = Point( objects[i].x + objects[i].width, objects[i].y + objects[i].height );
rectangle( frame, pt1, pt2, Scalar( 34, 92, 241 ), 2, 8, 0 );
Mat faceROI = frame_gray( objects[i] );
}
//-- Show what you got
imshow( window_name, frame );
// End
}
else{ std::cout << "ERROR: frame.empty returns 1!"; break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
which plays a video from the build-in webcam and detect faces, my idea is that i want the video to stop when an object -face- is detected, then display a window contains the detected object only from the last frame.

unable to write the output video in OpenCV, program is writing only single frame

Could anyone please help me write the output video file? I have read many similar questions on how to write the program and followed the exact steps to write the video file in .avi format, but I am not able to find out where I am wrong. The face_output.avi file is created but it only contains one frame. My program is not adding all the frames to the video file. Below is the complete code:
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame);
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
double fps;
/** #function main */
int main( int argc, const char** argv )
{
VideoCapture cap("/home/pradeep/Downloads/President Obama Lectures Romney.mp4"); // open the video file for reading
if ( !cap.isOpened() ) // if not success, exit program
{
cout << "Cannot open the video file" << endl;
return -1;
}
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
fps = cap.get(CV_CAP_PROP_FPS); //get the frames per seconds of the video
cout << "Frame per seconds : " << fps << endl;
double dWidth = cap.get(CV_CAP_PROP_FRAME_WIDTH);
double dHeight = cap.get(CV_CAP_PROP_FRAME_HEIGHT);
Size S(dWidth,dHeight);
while(1)
{
Mat frame;
int skip_frame = 4;
while(skip_frame)
{
bool bSuccess = cap.read(frame); // read a new frame from video
skip_frame--;
if (!bSuccess) //if not success, break loop
{
cout << "Cannot read the frame from video file" << endl;
break;
}
}
//-- 3. Apply the classifier to the frame
if( frame.empty() )
{ printf(" --(!) No captured frame -- Break!"); break; }
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.3, 5, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 0, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
VideoWriter Video("face_output.avi", CV_FOURCC('M','J','P','G'), fps, S, true);
if(!Video.isOpened())
{
printf("unable to write video file");
}
Video.write(frame);
//-- Show what you got
imshow( window_name, frame );
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
return 0;
}
You are creating VideoWriter Video("face_output.avi", CV_FOURCC('M','J','P','G'), fps, S, true); inside the while(1), so you create a new Video in each iteration. Since you only write one frame per iteration, this is will be the only content of your face_output.avi file.
Try moving that line before the while(1):
// ...
Size S(dWidth,dHeight);
VideoWriter Video("face_output.avi", CV_FOURCC('M','J','P','G'), fps, S, true);
while(1)
{
// ...

OpenCV circle detection bounding box

I'm working on a software using OpenCV for circles detection. I think that the most important problem is the image. Previously I try to detect circle by HoughCircles with bad results. After, I try to follow the instructions in this post but it doesn't work. Maybe I need some help to pre-processing image. Do anyone have any other ideas for detecting edges?
Original Image :
others similar images:
http://imgur.com/a/eSKFr
Below I have posted the code:
//Global variables
Mat src; Mat src_gray, threshold_output, element,dilated,eroded1, eroded2;
int thresh = 125;
int const max_value = 255;
int const max_BINARY_value = 255;
RNG rng(12345);
int s_ero1 =1;
int s_dil = 2;
int s_ero2 = 1;
int max_s = 50;
string source_window = "Thresh";
string TrackbarName = "Dilated";
string TrackbarName1 = "Eroded1";
string TrackbarName2 = "Eroded2";
/// Function header
void thresh_callback(int, void* );
void dilate_trackbar(int, void*);
void erode_trackbar1(int,void*);
void erode_trackbar2(int,void*);
int main( int, char** argv )
{
/// Load source image and convert it to gray
src = imread( "/media/Dati/image01.tif", 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, COLOR_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
namedWindow( "source", WINDOW_NORMAL );
imshow( "source", src );
waitKey();
namedWindow( source_window, WINDOW_NORMAL );
//Create trackbar threshold
createTrackbar( " Threshold:", source_window, &thresh, max_value, thresh_callback );
thresh_callback( 0, 0 );
waitKey();
namedWindow( TrackbarName1, WINDOW_NORMAL );
createTrackbar( "Size: ", TrackbarName1, &s_ero1, max_s, erode_trackbar1);
erode_trackbar1(0,0);
waitKey();
namedWindow( TrackbarName, WINDOW_NORMAL );
createTrackbar( "Size: ", TrackbarName, &s_dil, max_s, dilate_trackbar);
dilate_trackbar(0,0);
waitKey();
namedWindow( TrackbarName2, WINDOW_NORMAL );
createTrackbar( "Size: ", TrackbarName2, &s_ero2, max_s, erode_trackbar2);
erode_trackbar2(0,0);
waitKey();
return(0);
}
/**
* #function bounding_box
*/
void bounding_box(Mat m){
int max_point_pos = 0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
// Find contours
findContours( m, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE, Point(0, 0) );
cout<<"Numero di blob: "<< contours.size()<<endl;
for(int i = 1; i < contours.size(); i++){
max_point_pos = contours[max_point_pos].size() > contours[i].size()? max_point_pos : i;
}
int max_point = contours[max_point_pos].size();
cout<< "il blob con più punti è associato alla posizione : " << max_point_pos << " con " << max_point << " punti"<< endl;
/// Approximate contours to polygons + get bounding rects and circles
vector<vector<Point> > contours_poly( contours.size() );
vector<Rect> boundRect( contours.size() );
vector<Point2f>center( contours.size() );
vector<float>radius( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ approxPolyDP( Mat(contours[i]), contours_poly[i], 3, true );
boundRect[i] = boundingRect( Mat(contours_poly[i]) );
minEnclosingCircle( (Mat)contours_poly[i], center[i], radius[i] );
}
/// Draw polygonal contour + bounding rects + circles
Mat drawing = src.clone();
for( size_t i = 0; i< contours.size(); i++ )
{
if(contours[i].size() > 0.6*max_point){
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
//drawContours( drawing, contours_poly, (int)i, color, 1, 8, vector<Vec4i>(), 0, Point() );
//rectangle( drawing, boundRect[i].tl(), boundRect[i].br(), color, 2, 8, 0 );
circle( drawing, center[i], (int)radius[i], color, 7, 8, 0 );
}
}
/// Show in a window
namedWindow( "Contours", WINDOW_NORMAL );
imshow( "Contours", drawing );
}
/**
* #function thresh_callback
*/
void thresh_callback(int, void* )
{
/// Detect edges using Threshold
threshold( src_gray, threshold_output, thresh, max_BINARY_value, THRESH_BINARY_INV);
imshow(source_window, threshold_output);
}
/**
* #function dilate_trackbar
* #brief Callback for trackbar
*/
void dilate_trackbar( int, void* )
{
dilated = threshold_output.clone();
element = getStructuringElement(MORPH_ELLIPSE,Size(s_dil, s_dil) , Point(-1,-1));
dilate(dilated,dilated,element,Point(-1,-1),1);
imshow(TrackbarName, dilated);
}
/**
* #function erode_trackbar
* #brief Callback for trackbar
*/
void erode_trackbar1( int, void*)
{
eroded1 = threshold_output.clone();
element = getStructuringElement(MORPH_ELLIPSE,Size(s_ero1, s_ero1) , Point(-1,-1));
erode(eroded1,eroded1,element,Point(-1,-1),1);
imshow(TrackbarName1, eroded1);
}

Applying medianBlur as well as Laplacian and threshold filters

I have a function, with the following signature:
Mat cartoonifyImage( Mat, Mat );
I also have a VS2010 program as follows, where I apply to a webcam stream a number of filters, as taught in this book: Mastering OpenCV
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
// imshow( "Camera Test", cameraFrame );
Mat displayedFrame( cameraFrame.size(), CV_8UC3 );
cartoonifyImage( cameraFrame, displayedFrame );
imshow( "Cartoonifier!", displayedFrame );
int keypress = waitKey( 20 );
if( keypress == 27 ) break;
}
}
Here is my function definition:
Mat cartoonifyImage( Mat srcColor, Mat mask )
{
Mat gray, edges;
cvtColor( srcColor, gray, CV_BGR2GRAY );
const int MEDIAN_BLUR_FILTER_SIZE = 7;
const int LAPLACIAN_FILTER_SIZE = 5;
const int EDGES_THRESHOLD = 80;
medianBlur( gray, gray, MEDIAN_BLUR_FILTER_SIZE );
Laplacian( gray, edges, CV_8U, LAPLACIAN_FILTER_SIZE );
threshold( edges, mask, EDGES_THRESHOLD, 255, THRESH_BINARY_INV );
return( mask );
}
When I run the program, I get a blank (gray) window.
Where the first imshow is commented out, I made sure the webcam is working and I can see my own image in the window, so the problem must be elsewhere.
Can anyone help me understand where the problem is and what I am doing wrong?
Thank you,
your displayedFrame never got filled.
(you pass it into the func, it gets manipulated there, but since you gave it a copy, you don't get the result back)
either return a Mat from cartoonifyImage:
Mat displayed = cartoonifyImage( cameraFrame );
or pass references :
void cartoonifyImage( const Mat & cameraFrame, Mat & displayedFrame );
Mat cartoonifyImage( Mat srcColor )
{
Mat gray, edges, mask;
cvtColor( srcColor, gray, CV_BGR2GRAY );
const int MEDIAN_BLUR_FILTER_SIZE = 7;
const int LAPLACIAN_FILTER_SIZE = 5;
const int EDGES_THRESHOLD = 80;
medianBlur( gray, gray, MEDIAN_BLUR_FILTER_SIZE );
Laplacian( gray, edges, CV_8U, LAPLACIAN_FILTER_SIZE );
threshold( edges, mask, EDGES_THRESHOLD, 255, THRESH_BINARY_INV );
return ( mask );
}
int main( int argc, const char** argv )
{
VideoCapture camera;
camera.open(0);
if( !camera.isOpened() )
{
cerr << "Could not access the camera!" << endl;
return 1;
}
while( true )
{
Mat cameraFrame;
camera >> cameraFrame;
if( cameraFrame.empty() )
{
cerr << "Could not grab a camera frame!" << endl;
return 1;
}
//imshow( "Camera Test", cameraFrame );
Mat displayedFrame( cameraFrame.size(), CV_8UC3 );
displayedFrame = cartoonifyImage(cameraFrame);
imshow( "Cartoonifier!", displayedFrame );
int keypress = waitKey( 20 );
if( keypress == 27 ) break;
}
}