When I'm starting program - have that error:
OpenCV Error: Unsupported format or combination of formats (In the Fisherfaces method all input samples (training images) must be of equal size! Expected 10304 pixels, but was 0 pixels.) in cv::Fisherfaces::train, file C:\builds\2_4_PackSlave-win64-vc12-shared\opencv\modules\contib\src\facerec.cpp, line 455
I tried to start program with png and pgm formats.
Code:
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if (!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int main(int argc, const char *argv[]) {
// Check for valid command line arguments, print usage
// if no arguments were given.
if (argc != 4) {
cout << "usage: " << argv[0] << " </path/to/haar_cascade> </path/to/csv.ext> </path/to/device id>" << endl;
cout << "\t </path/to/haar_cascade> -- Path to the Haar Cascade for face detection." << endl;
cout << "\t </path/to/csv.ext> -- Path to the CSV file with the face database." << endl;
cout << "\t <device id> -- The webcam device id to grab frames from." << endl;
exit(1);
}
// Get the path to your CSV:
string fn_haar = string(argv[1]);
string fn_csv = string(argv[2]);
int deviceId = atoi(argv[3]);
// These vectors hold the images and corresponding labels:
vector<Mat> images;
vector<int> labels;
// Read in the data (fails if no valid input filename is given, but you'll get an error message):
try {
read_csv(fn_csv, images, labels);
}
catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size AND we need to reshape incoming faces to this size:
int im_width = images[0].cols;
int im_height = images[0].rows;
// Create a FaceRecognizer and train it on the given images:
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
model->train(images, labels);
// That's it for learning the Face Recognition model. You now
// need to create the classifier for the task of Face Detection.
// We are going to use the haar cascade you have specified in the
// command line arguments:
//
CascadeClassifier haar_cascade;
haar_cascade.load(fn_haar);
// Get a handle to the Video device:
VideoCapture cap(deviceId);
// Check if we can use this device at all:
if (!cap.isOpened()) {
cerr << "Capture Device ID " << deviceId << "cannot be opened." << endl;
return -1;
}
// Holds the current frame from the Video device:
Mat frame;
for (;;) {
cap >> frame;
// Clone the current frame:
Mat original = frame.clone();
// Convert the current frame to grayscale:
Mat gray;
cvtColor(original, gray, CV_BGR2GRAY);
// Find the faces in the frame:
vector< Rect_<int> > faces;
haar_cascade.detectMultiScale(gray, faces);
// At this point you have the position of the faces in
// faces. Now we'll get the faces, make a prediction and
// annotate it in the video. Cool or what?
for (int i = 0; i < faces.size(); i++) {
// Process face by face:
Rect face_i = faces[i];
// Crop the face from the image. So simple with OpenCV C++:
Mat face = gray(face_i);
// Resizing the face is necessary for Eigenfaces and Fisherfaces. You can easily
// verify this, by reading through the face recognition tutorial coming with OpenCV.
// Resizing IS NOT NEEDED for Local Binary Patterns Histograms, so preparing the
// input data really depends on the algorithm used.
//
// I strongly encourage you to play around with the algorithms. See which work best
// in your scenario, LBPH should always be a contender for robust face recognition.
//
// Since I am showing the Fisherfaces algorithm here, I also show how to resize the
// face you have just found:
Mat face_resized;
cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
// Now perform the prediction, see how easy that is:
int prediction = model->predict(face_resized);
// And finally write all we've found out to the original image!
// First of all draw a green rectangle around the detected face:
rectangle(original, face_i, CV_RGB(0, 255, 0), 1);
// Create the text we will annotate the box with:
string box_text = format("Prediction = %d", prediction);
// Calculate the position for annotated text (make sure we don't
// put illegal values in there):
int pos_x = std::max(face_i.tl().x - 10, 0);
int pos_y = std::max(face_i.tl().y - 10, 0);
// And now put it into the image:
putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0, 255, 0), 2.0);
}
// Show the result:
imshow("face_recognizer", original);
// And display it:
char key = (char)waitKey(20);
// Exit this loop on escape:
if (key == 27)
break;
}
return 0;
}
one of your training images was invalid.
might be, that you had a non-image file (like a redme.txt) in your folder, when you generated the csv
also a good idea to check the loading, instead of:
images.push_back(imread(path, 0));
in the above code, try:
Mat m = imread(path, 0);
if ( m.empty() )
{
cerr << path << " could not be read!" << endl;
return;
}
images.push_back(m);
Related
i got a code from face recognition from videos on c++ the code works fine on windows but when i got on ubuntu when i run it and pass the arguments it give the error : segmentation fault (core dumped) i debugged the program using valgrind with gdb , the errors is in line 66 which is : int im_width = images[0].cols; i dont get what could cause it here is the complete code i have :
#include "opencv2/core.hpp"
#include "opencv2/face.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
#include <ctime>
using namespace cv;
using namespace std;
using namespace cv::face;
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int main(int argc, const char *argv[]) {
// Check for valid command line arguments, print usage
// if no arguments were given.
if (argc != 4) {
cout << "usage: " << argv[0] << " </path/to/haar_cascade> </path/to/csv.ext> </path/to/device id>" << endl;
cout << "\t </path/to/haar_cascade> -- Path to the Haar Cascade for face detection." << endl;
cout << "\t </path/to/csv.ext> -- Path to the CSV file with the face database." << endl;
cout << "\t <device id> -- The webcam device id to grab frames from." << endl;
exit(1);
}
// Get the path to your CSV:
string fn_haar = string(argv[1]);
string fn_csv = string(argv[2]);
int deviceId = atoi(argv[3]);
// These vectors hold the images and corresponding labels:
vector<Mat> images;
vector<int> labels;
// Read in the data (fails if no valid input filename is given, but you'll get an error message):
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size AND we need to reshape incoming faces to this size:
int im_width = images[0].cols;
int im_height = images[0].rows;
// Create a FaceRecognizer and train it on the given images:
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
model->train(images, labels);
// That's it for learning the Face Recognition model. You now
// need to create the classifier for the task of Face Detection.
// We are going to use the haar cascade you have specified in the
// command line arguments:
//
CascadeClassifier haar_cascade;
haar_cascade.load(fn_haar);
// Get a handle to the Video device:
VideoCapture cap(deviceId);
// Check if we can use this device at all:
if(!cap.isOpened()) {
cerr << "Capture Device ID " << deviceId << "cannot be opened." << endl;
return -1;
}
// Holds the current frame from the Video device:
Mat frame;
for(;;) {
cap >> frame;
// Clone the current frame:
Mat original = frame.clone();
// Convert the current frame to grayscale:
Mat gray;
cvtColor(original, gray, CV_BGR2GRAY);
// Find the faces in the frame:
vector< Rect_<int> > faces;
haar_cascade.detectMultiScale(gray, faces);
// At this point you have the position of the faces in
// faces. Now we'll get the faces, make a prediction and
// annotate it in the video. Cool or what?
for(unsigned int i = 0; i < faces.size(); i++) {
// Process face by face:
Rect face_i = faces[i];
// Crop the face from the image. So simple with OpenCV C++:
Mat face = gray(face_i);
// Resizing the face is necessary for Eigenfaces and Fisherfaces. You can easily
// verify this, by reading through the face recognition tutorial coming with OpenCV.
// Resizing IS NOT NEEDED for Local Binary Patterns Histograms, so preparing the
// input data really depends on the algorithm used.
//
// I strongly encourage you to play around with the algorithms. See which work best
// in your scenario, LBPH should always be a contender for robust face recognition.
//
// Since I am showing the Fisherfaces algorithm here, I also show how to resize the
// face you have just found:
Mat face_resized;
cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
// Now perform the prediction, see how easy that is:
int prediction = model->predict(face_resized);
// And finally write all we've found out to the original image!
// First of all draw a green rectangle around the detected face:
rectangle(original, face_i, CV_RGB(0, 255,0), 1);
// Create the text we will annotate the box with:
string box_text = format("Prediction = %d", prediction);
// Calculate the position for annotated text (make sure we don't
// put illegal values in there):
int pos_x = std::max(face_i.tl().x - 10, 0);
int pos_y = std::max(face_i.tl().y - 10, 0);
// And now put it into the image:
putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
}
// Show the result:
imshow("face_recognizer", original);
// And display it:
char key = (char) waitKey(20);
// Exit this loop on escape:
if(key == 27)
break;
}
return 0;
}
I suspect that vector<Mat> images; is empty, and so accessing images[0] is crashing.
Verify that images is not empty by either using a debugger or printing out it's state. Such as:
int im_width = 0;
int im_height = 0;
if(images.size())
{
im_width = images[0].cols;
im_height = images[0].rows;
}
else
{
std::cout << "Images is Empty!" << std::endl;
}
if((im_width > 0) && (im_height > 0))
{
// Continue on with valid dimensions
}
I am using C++ and Opencv 2.3.1 for background subtraction. I have tried many times to change the parameters of Mog2 in order to disable shadow detection feature also i have tried what other people suggest on the internet. however, the shadow detection still enabled.
could you please tell me how to disable it?
see the sample code and the generated mask.
//opencv
#include < opencv2/opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2/video/background_segm.hpp>
#include < opencv2/imgproc/imgproc.hpp>
#include < opencv2/video/video.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
// Global variables
Mat frame; //current frame
Mat fgMaskMOG2; //fg mask fg mask generated by MOG method
Ptr<BackgroundSubtractor> pMOG2; //MOG Background subtractor
int keyboard; //input from keyboard
//new variables
int history = 1250;
float varThreshold = 16;
bool bShadowDetection = true;
/*
//added to remove the shadow
unsigned char nShadowDetection = 0;
float fTau = 0.5;
//static const unsigned char nShadowDetection =( unsigned char)0;
*/
// Function Headers
void help();
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process images (-img)." << endl
<< "Usage:" << endl
<< "./bs -img <image filename>}" << endl
<< "for example: ./bs -img /data/images/1.png" << endl
<< endl;
}
// morphological operation
void morphOps(Mat &thresh){
//create structuring element that will be used to "dilate" and "erode" image.
//the element chosen here is a 3px by 3px rectangle
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(2,2)); //3x3
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(1,1)); //8x8
erode(thresh,thresh,erodeElement);
erode(thresh,thresh,erodeElement);
dilate(thresh,thresh,dilateElement);
dilate(thresh,thresh,dilateElement);
}
// main function
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG2 ");
//create Background Subtractor objects
//pMOG2 = new BackgroundSubtractorMOG2();
pMOG2 = new BackgroundSubtractorMOG2( history, varThreshold, bShadowDetection);
//BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=1);
if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
//function processImages
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(frame.empty()){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
pMOG2->operator()(frame, fgMaskMOG2,-1);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
morphOps(fgMaskMOG2);
imshow("FG Mask MOG2 ", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey(1);
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(frame.empty()){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
// save subtracted images
string imageToSave =("output_MOG_" + frameNumberString + ".png");
bool saved = imwrite( "D:\\SO\\temp\\" +imageToSave,fgMaskMOG2);
if(!saved) {
cerr << "Unable to save " << imageToSave << endl;
}
}
}
}
take a look at the documentation
on your code you have
bool bShadowDetection = true;
change it to
bool bShadowDetection = false;
EDIT:
OpenCV 3's BackgroundSubtractorMOG2 Class has setShadowValue (int value) function to set gray value of shadow.
setting value of gray to zero will remove the shadow.
it depends on what you really want to see - if you want to separate the shadows from your segmentation:
bool bShadowDetection = true;
and use
cv::threshold(Mask,Mask,254,255,cv::THRESH_BINARY);
after MOG2->apply()
you'll get exactly the part of wich is {255} in your image
and sry for reanimating this...
The highlighted code demonstrate openCV framework is loaded in my C code and it render Police watching. Which is just to demonstrate it works very smooth and very clean code to write.
Target: My webCAM is connected in to the USB port. I would like to capture the live webcam image and match from a local file (/tmp/myface.png), if live webcam match with local file myface.png, it will show the text "Police watching"
How can i now, capture my webCAM on this following code? 2) When the webCAM is captured, how can i load the file and find if it match, on match it shows that text only.
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <fstream>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
#include "opencv/cv.h"
void detectAndDisplay(Mat frame);
//*************
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
stringstream ssfn;
//*************
CascadeClassifier face_cascade;
string window_name = "Capture - Face detection";
int filenumber; // Number of file to be saved
string filename;
Mat frameread = imread("test.jpg");
int main(int argc, const char *argv[]){
if (argc != 4) {
cout << "usage: " << argv[0] << " </path/to/haar_cascade> </path/to/csv.ext> </path/to/device id>" << endl;
cout << "\t </path/to/haar_cascade> -- Path to the Haar Cascade for face detection." << endl;
cout << "\t </path/to/csv.ext> -- Path to the CSV file with the face database." << endl;
cout << "\t <device id> -- The webcam device id to grab frames from." << endl;
// exit(1);
}
CascadeClassifier face_cascade;
CascadeClassifier face_cascade1;
String fn="C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml";
String fn1="C:\\opencv\\sources\\data\\haarcascades\\haarcascade_eye.xml";
face_cascade.load(fn);
face_cascade1.load(fn1);
VideoCapture input(0);
if(!input.isOpened()){return -1;}
namedWindow("Mezo",1);
Mat f2;
Mat frame;
std::vector<Rect> faces,faces1;
CvCapture* capture1;
IplImage* f1;
Mat crop;
cv::Rect r;
// detectAndDisplay(frameread);
while(1)
{
ic=0;
ib=0;
ab=0;
ac=0;
input >> frame;
waitKey(10);
//cvtColor(frame, frame, CV_BGR2GRAY);
//cv::equalizeHist(frame,frame);
face_cascade.detectMultiScale(frame, faces, 1.1, 10, CV_HAAR_SCALE_IMAGE | CV_HAAR_DO_CANNY_PRUNING, cvSize(0,0), cvSize(300,300));
for(int i=0; i < faces.size();i++)
{
Point pt1(faces[i].x+faces[i].width, faces[i].y+faces[i].height);
Point pt2(faces[i].x,faces[i].y);
Mat faceROI = frame(faces[i]);
face_cascade1.detectMultiScale(faceROI, faces1, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30,30));
for(size_t j=0; j< faces1.size(); j++)
{
Point center(faces[i].x+faces1[j].x+faces1[j].width*0.5, faces[i].y+faces1[j].y+faces1[j].height*0.5);
int radius = cvRound((faces1[j].width+faces1[j].height)*0.25);
circle(frame, center, radius, Scalar(255,0,0), 2, 8, 0);
}
rectangle(frame, pt1, pt2, cvScalar(0,255,0), 2, 8, 0);
}
imshow("Result", frame);
waitKey(3);
char c = waitKey(3);
if(c == 27)
break;
}
return 0;
}
What you are asking about is probably the Face recognition. You should be more clear in your question.
Opencv has a class for doing recognition perfectly, not as you think to do.
Many approaches are available for this technology, Opencv has three algorithms. As well you need to prepare your database of images (labelled faces)
All this steps are described in opencv docs with some examples : http://docs.opencv.org/modules/contrib/doc/facerec/facerec_tutorial.html
Just you need to read and apply.
Here you can also find a good tutorial for beginners.
The program '[5772] opencv3.exe' has exited with code 1 (0x1).
Other errors:
opencv_flann248.dll
opencv_features2d248.dll
opencv_calib3d248.dll
opencv_ml248.dll
opencv_video248.dll
opencv_contrib248.dll
opencv_objdetect248.dll
opencv_highgui248.dll
opencv_imgproc248.dll
opencv_core248.dll
- Cannot find or open the PDB file.
Code:
#include "opencv2/core/core.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
static void read_csv(const string& filename, vector<Mat>& images, vector<int>& labels, char separator = ';') {
std::ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int main(int argc, const char *argv[]) {
// Check for valid command line arguments, print usage
// if no arguments were given.
if (argc != 4) {
cout << "usage: " << argv[0] << " </path/to/haar_cascade> </path/to/csv.ext> </path/to/device id>" << endl;
cout << "\t </path/to/haar_cascade> -- Path to the Haar Cascade for face detection." << endl;
cout << "\t </path/to/csv.ext> -- Path to the CSV file with the face database." << endl;
cout << "\t <device id> -- The webcam device id to grab frames from." << endl;
exit(1);
}
// Get the path to your CSV:
string fn_haar = string(argv[1]);
string fn_csv = string(argv[2]);
int deviceId = atoi(argv[3]);
// These vectors hold the images and corresponding labels:
vector<Mat> images;
vector<int> labels;
// Read in the data (fails if no valid input filename is given, but you'll get an error message):
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size AND we need to reshape incoming faces to this size:
int im_width = images[0].cols;
int im_height = images[0].rows;
// Create a FaceRecognizer and train it on the given images:
Ptr<FaceRecognizer> model = createFisherFaceRecognizer();
model->train(images, labels);
// That's it for learning the Face Recognition model. You now
// need to create the classifier for the task of Face Detection.
// We are going to use the haar cascade you have specified in the
// command line arguments:
//
CascadeClassifier haar_cascade;
haar_cascade.load(fn_haar);
// Get a handle to the Video device:
VideoCapture cap(deviceId);
// Check if we can use this device at all:
if(!cap.isOpened()) {
cerr << "Capture Device ID " << deviceId << "cannot be opened." << endl;
return -1;
}
// Holds the current frame from the Video device:
Mat frame;
for(;;) {
cap >> frame;
// Clone the current frame:
Mat original = frame.clone();
// Convert the current frame to grayscale:
Mat gray;
cvtColor(original, gray, CV_BGR2GRAY);
// Find the faces in the frame:
vector< Rect_<int> > faces;
haar_cascade.detectMultiScale(gray, faces);
// At this point you have the position of the faces in
// faces. Now we'll get the faces, make a prediction and
// annotate it in the video. Cool or what?
for(int i = 0; i < faces.size(); i++) {
// Process face by face:
Rect face_i = faces[i];
// Crop the face from the image. So simple with OpenCV C++:
Mat face = gray(face_i);
// Resizing the face is necessary for Eigenfaces and Fisherfaces. You can easily
// verify this, by reading through the face recognition tutorial coming with OpenCV.
// Resizing IS NOT NEEDED for Local Binary Patterns Histograms, so preparing the
// input data really depends on the algorithm used.
//
// I strongly encourage you to play around with the algorithms. See which work best
// in your scenario, LBPH should always be a contender for robust face recognition.
//
// Since I am showing the Fisherfaces algorithm here, I also show how to resize the
// face you have just found:
Mat face_resized;
cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
// Now perform the prediction, see how easy that is:
int prediction = model->predict(face_resized);
// And finally write all we've found out to the original image!
// First of all draw a green rectangle around the detected face:
rectangle(original, face_i, CV_RGB(0, 255,0), 1);
// Create the text we will annotate the box with:
string box_text = format("Prediction = %d", prediction);
// Calculate the position for annotated text (make sure we don't
// put illegal values in there):
int pos_x = std::max(face_i.tl().x - 10, 0);
int pos_y = std::max(face_i.tl().y - 10, 0);
// And now put it into the image:
putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
}
// Show the result:
imshow("face_recognizer", original);
// And display it:
char key = (char) waitKey(20);
// Exit this loop on escape:
if(key == 27)
break;
}
return 0;
}
look at the code, at the beginning of main():
cout << "usage: " << argv[0] << " </path/to/haar_cascade> </path/to/csv.ext> </path/to/device id>" << endl;
so, you have to pass 3 cmdline args to your prog here:
a cascade-file ( an xml file [either lbp or haar] from opencv/data for the face detection )
the csv (txt) file with the names and the labels of the training images
the camera device id used for the later prediction
I am trying to make a program work, but for some reason i get a memory corruption error.
I am only using the "videoprocess" method and not the imageprocess method.
When I target a specific video, it works only once. When I try to use it again on the same video, i get this kind of error:
*** Error in `./camack': malloc(): memory corruption: 0x000000000234b2c0 ***
I cant figure out where is the problem...
Here's the code:
//opencv
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
//global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard;
//function declarations
void help();
void processVideo(char* videoFilename);
void processImages(char* firstFrameFilename);
void help()
{
cout
<< "--------------------------------------------------------------------------" << endl
<< "This program shows how to use background subtraction methods provided by " << endl
<< " OpenCV. You can process both videos (-vid) and images (-img)." << endl
<< endl
<< "Usage:" << endl
<< "./bs {-vid <video filename>|-img <image filename>}" << endl
<< "for example: ./bs -vid video.avi" << endl
<< "or: ./bs -img /data/images/1.png" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main(int argc, char* argv[])
{
//print help information
help();
//check for the input parameter correctness
if(argc != 3) {
cerr <<"Incorret input list" << endl;
cerr <<"exiting..." << endl;
return EXIT_FAILURE;
}
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG");
namedWindow("FG Mask MOG 2");
//create Background Subtractor objects
//NOTE HERE!!!!
pMOG= new BackgroundSubtractorMOG(); //MOG approach
pMOG2 = new BackgroundSubtractorMOG2(); //MOG2 approach
if(strcmp(argv[1], "-vid") == 0) {
//input data coming from a video
processVideo(argv[2]);
}
else if(strcmp(argv[1], "-img") == 0) {
//input data coming from a sequence of images
processImages(argv[2]);
}
else {
//error in reading input parameters
cerr <<"Please, check the input parameters." << endl;
cerr <<"Exiting..." << endl;
return EXIT_FAILURE;
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
void processVideo(char* videoFilename) {
//create the capture object
VideoCapture capture(videoFilename);
//VideoCapture capture(0);
if(!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open video file: " << videoFilename << endl;
exit(EXIT_FAILURE);
}
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//read the current frame
if(!capture.read(frame)) {
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
//AND HERE!!!
pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
}
void processImages(char* fistFrameFilename) {
//read the first file of the sequence
frame = imread(fistFrameFilename);
if(!frame.data){
//error in opening the first image
cerr << "Unable to open first image frame: " << fistFrameFilename << endl;
exit(EXIT_FAILURE);
}
//current image filename
string fn(fistFrameFilename);
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//update the background model
//ALSO HERE!!!!
pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
size_t index = fn.find_last_of("/");
if(index == string::npos) {
index = fn.find_last_of("\\");
}
size_t index2 = fn.find_last_of(".");
string prefix = fn.substr(0,index+1);
string suffix = fn.substr(index2);
string frameNumberString = fn.substr(index+1, index2-index-1);
istringstream iss(frameNumberString);
int frameNumber = 0;
iss >> frameNumber;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
//search for the next image in the sequence
ostringstream oss;
oss << (frameNumber + 1);
string nextFrameNumberString = oss.str();
string nextFrameFilename = prefix + nextFrameNumberString + suffix;
//read the next frame
frame = imread(nextFrameFilename);
if(!frame.data){
//error in opening the next image in the sequence
cerr << "Unable to open image frame: " << nextFrameFilename << endl;
exit(EXIT_FAILURE);
}
//update the path of the current frame
fn.assign(nextFrameFilename);
}
}
Can you help me on this one? thank you a lot
global vars are evil ! you should not use them.
here, Ptr<BackgroundSubtractor> pMOG2; will never get released
move it into main, and pass it as an arg to functions like processVideo()
also, all images passed to the BackgroundSubtractor must have the same size.
If you dont need the processImage part then remove it so that you can have a short code which is easy for you to understand. I have remove the irrelevant part. Here is the short code which is working fine here at my system;
//opencv
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/background_segm.hpp>
//C
#include <stdio.h>
//C++
#include <iostream>
#include <sstream>
using namespace cv;
using namespace std;
//global variables
Mat frame; //current frame
Mat fgMaskMOG; //fg mask generated by MOG method
Mat fgMaskMOG2; //fg mask fg mask generated by MOG2 method
Ptr<BackgroundSubtractor> pMOG; //MOG Background subtractor
Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor
int keyboard;
//function declarations
void processVideo(char* videoFilename);
int main(int argc, char* argv[])
{
//create GUI windows
namedWindow("Frame");
namedWindow("FG Mask MOG");
namedWindow("FG Mask MOG 2");
//create Background Subtractor objects
//NOTE HERE!!!!
pMOG= new BackgroundSubtractorMOG(); //MOG approach
pMOG2 = new BackgroundSubtractorMOG2(); //MOG2 approach
if(strcmp(argv[1], "-vid") == 0) {
//input data coming from a video
processVideo(argv[2]);
}
//destroy GUI windows
destroyAllWindows();
return EXIT_SUCCESS;
}
void processVideo(char* videoFilename) {
//create the capture object
VideoCapture capture(videoFilename);
//VideoCapture capture(0);
if(!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open video file: " << videoFilename << endl;
exit(EXIT_FAILURE);
}
//read input data. ESC or 'q' for quitting
while( (char)keyboard != 'q' && (char)keyboard != 27 ){
//read the current frame
if(!capture.read(frame)) {
cerr << "Unable to read next frame." << endl;
cerr << "Exiting..." << endl;
exit(EXIT_FAILURE);
}
//update the background model
//AND HERE!!!
pMOG->operator()(frame, fgMaskMOG);
pMOG2->operator()(frame, fgMaskMOG2);
//get the frame number and write it on the current frame
stringstream ss;
rectangle(frame, cv::Point(10, 2), cv::Point(100,20),
cv::Scalar(255,255,255), -1);
ss << capture.get(CV_CAP_PROP_POS_FRAMES);
string frameNumberString = ss.str();
putText(frame, frameNumberString.c_str(), cv::Point(15, 15),
FONT_HERSHEY_SIMPLEX, 0.5 , cv::Scalar(0,0,0));
//show the current frame and the fg masks
imshow("Frame", frame);
imshow("FG Mask MOG", fgMaskMOG);
imshow("FG Mask MOG 2", fgMaskMOG2);
//get the input from the keyboard
keyboard = waitKey( 30 );
}
//delete capture object
capture.release();
}