SURF Feature extraction and Keypoint match based on FlannBasedMatcher - c++

Following is my code, which used to extract the features using SURF and which will match the points using flannBasedMatcher.
Mat object = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
if( !object.data )
{
// std::cout<< "Error reading object " << std::endl;
return -2;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
char key = 'a';
//VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
Mat image = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch >> matches;
std::vector<std::vector<cv::DMatch>> matches1;
std::vector<std::vector<cv::DMatch>> matches2;
std::vector<cv::DMatch> matches3;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
//cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
printf("Images matching %d , %d", good_matches.size(), kp_object.size());
//return 1;
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
//H = findHomography( obj, scene, CV_RANSAC );
//printf("Size : %d", H.size());
//perspectiveTransform( obj_corners, scene_corners, H);
//printf("Size : %d --- %d --- %d", H.size(), scene_corners.size());
}else{
printf("Images matching %d , %d", good_matches.size(), kp_object.size());
}
//Show detected matches
imshow( "Good Matches", img_matches );
waitKey(0);
return 0;
In this code i want to know what exactly happens through this method
matcher.knnMatch(des_object, des_image, matches, 2);
As i know i passes the two descriptors of the matching images and the matches vector is filled with 2 nearest neighbors. I want to know what exactly happens in the method and how the matches method is filled and what points are filled to it.
In this code segment
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
im using the nearest neibour distance ratio(nndr) as 0.6, i wanted to know how the good_matches are find out and how the nndr value change will effect.
It would be a great help, if i could resolve this code.
Thanks.

The FlannBasedMatcher is based on the paper written by Muja et. al.; you can find the exact algorithm and how they go about it there..
Regarding the good_matches, you just saw in the code snippet itself that it is a collection of the best matches your result has based on the criteria, i.e., nndr.. It is basically a threshold that decides how far a match is allowed before dropping the match altogether.. Higher the threshold, more points are considered and more are the number of positive matches (whether they are true positives or not will be determined by your dataset and the way you have set the nndr level)..
Hope this helps.

Related

cv::drawMatches(...) not drawing

I've been trying to make a SURF implementation work and I have been having some trouble, now I finally think i got it 'right' but I have one small problem.
The problem is pretty straight forward and I am sure its something simple but I can't solve it. The image displays right and everything but the matches are not drawn. Here is my code.
#include <stdio.h>
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/opencv.hpp>
#include "opencv2/features2d/features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv ){
if ( argc != 3 ) {
printf("usage: DisplayImage.out <Image1_Path> <Image2_Path>\n");
return -1;
}
Mat image, image2;
image = imread( argv[1], 0 );
image2 = imread(argv[2], 0);
if ( !image.data ){
printf("No image data \n");
return -1;
}
if ( !image2.data ){
printf("No image data \n");
return -1;
}
/*
Ptr<FeatureDetector> detector = FastFeatureDetector::create(15);
vector<KeyPoint> keypoints1, keypoints2;
detector->detect(image, keypoints1);
detector->detect(image2, keypoints2);
Ptr<xfeatures2d::SURF> extractor = xfeatures2d::SURF::create();
Mat descriptors1, descriptors2;
extractor->compute(image, keypoints1, descriptors1);
extractor->compute(image2, keypoints2, descriptors2);
BFMatcher::BFMatcher matcher(L2<float>);
//BruteForceMatcher<L2<float> > matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
*/
Ptr<xfeatures2d::SURF> surf = cv::xfeatures2d::SURF::create();
vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
surf->detectAndCompute(image,noArray(), keypoints1, descriptors1, true);
//BFMatcher::BFMatcher matcher(L2<float>);
BFMatcher::BFMatcher matcher(NORM_HAMMING);
vector<DMatch> matches;
Mat res;
matcher.match(descriptors1,descriptors2, matches);
drawMatches(image, keypoints1, image2, keypoints2, matches, res);
namedWindow("Display Image", WINDOW_AUTOSIZE );
imshow("Display Image", res);
waitKey(0);
return 0;
}

Haar detection - save Mat of image in order to get and show previous frame

I am using Haar detection in my hobby project, the detection is done on a video stream. Once Haar detects something I imshow it, this is how it looks like: Mat faceROI = frame_gray(faces[i]);
imshow( "Detection", faceROI);
While the video is running I am getting detections and the Mat is getting updated/overwritten with a new image of the object. What I want to do now is to save Mat so when a new detections occure I get both the previous and current frame. I'll guess I have to save the Mat in some way and then update it so current -> previous and so on.
imshow( "Previous detection", previousROI);` <- want to be able to do this
In case you want to see the whole code, I am doing this: http://docs.opencv.org/2.4/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.html
Help is much appreciated!
You might have and easier time if you don't split the detect/display into a separate function. I've modified the OpenCV documentation code below. Keep in mind I haven't compiled or run this so there may be some errors, but it should give you an idea of a different way to address the issue.
/** #function main */
int main( int argc, const char** argv )
{
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
//store faces here
std::vector<Mat> prev_faces;
std::vector<Mat> current_faces;
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Mat faceROI = frame_gray( faces[i] );
current_faces.push_back(faceROI); //adds all of the current detections to a vector
}
if (prev_faces.size() > 0 && current_faces.size() > 0)
{
// do stuff with prev_faces and current_faces
// for(int i = 0; i < prev_faces.size(); i++){
// imshow("previous", prev_faces[i])
// }
// for(int i = 0; i < prev_faces.size(); i++){
// imshow("current", current_faces[i])
// }
// imshow("stuff", other_cool_Mats_I_made_by_analysing_and_comparing_prev_and_current)
}
prev_faces = current_faces;
current_faces.erase(current_faces.begin(), current_faces.end());
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}

Using opencv + viola jones running slow

I am trying to implement Face detection viola jones classifier using opencv cascade. This is the code i am using:
int main( ){
SYSTEMTIME tm;
GetLocalTime(&tm);
printf("Date: %02d.%02d.%d, %02d:%02d:%02d:%02d\n", tm.wDay, tm.wMonth, tm.wYear, tm.wHour, tm.wMinute, tm.wSecond, tm.wMilliseconds);
//CString t = CTime::GetCurrentTime().Format("%H:%M:%S:%MS");
Mat image;
Mat frame_gray;
image = imread("test.jpg", CV_LOAD_IMAGE_COLOR);
namedWindow( "window1", 1 ); imshow( "window1", image );
// Load Face cascade (.xml file)
CascadeClassifier face_cascade;
face_cascade.load( "cascades.xml" );
cvtColor( image, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
GetLocalTime(&tm);
printf("Date: %02d.%02d.%d, %02d:%02d:%02d:%02d\n", tm.wDay, tm.wMonth, tm.wYear, tm.wHour, tm.wMinute, tm.wSecond, tm.wMilliseconds);
float pyramidScale = 1.5f;
// Detect faces
std::vector<Rect> faces;
face_cascade.detectMultiScale( frame_gray, faces, pyramidScale, 3, 0, Size(20, 20), Size(50, 50));
GetLocalTime(&tm);
printf("Date: %02d.%02d.%d, %02d:%02d:%02d:%02d\n", tm.wDay, tm.wMonth, tm.wYear, tm.wHour, tm.wMinute, tm.wSecond, tm.wMilliseconds);
// Draw circles on the detected faces
for( int i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( image, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 255, 255 ), 4, 8, 0 );
}
imshow( "Detected Face", image );
SYSTEMTIME tm1;
GetLocalTime(&tm);
printf("Date: %02d.%02d.%d, %02d:%02d:%02d:%02d\n", tm.wDay, tm.wMonth, tm.wYear, tm.wHour, tm.wMinute, tm.wSecond, tm.wMilliseconds);
//cout<< "Time : "<<tm.wHour<<":"<<tm.wMinute << ":"<< tm.wSecond << ":" << tm.wMilliseconds << "\n";
waitKey(0);
return 0;
}
The problem is it says this is viola jones implementation in opencv and usually takes 30fps to run (from the author side) but its taking 6 seconds to run a normal HD image for face detection of around 1920x1080. I wanted to ask is the implementation right or is there any problem with the way i am implementing the method and is there any way i can make it faster ? The cascade.xml is a file i trained using sample images. Thank you.

Automatic Image Stitching from video with opencv

Hy, i have error when i stitch frame from video,
here's my code
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace std;
using namespace cv;
Mat Stitching(Mat image1,Mat image2){
Mat gray_image1;
Mat gray_image2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 10;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective(image1,result,H,cv::Size(800,600));
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);
//imshow( "Result", result );
return result;
}
/** #function main */
int main( int argc, char** argv )
{
// Load the images
//Mat image1= imread( "E:\\Tugas Akhir\\image\\city2.jpg" );
//Mat image2= imread( "E:\\Tugas Akhir\\image\\city1.jpg" );
char *fileName = "E:\\Tugas Akhir\\Video Master\\indv_img_3a.avi";
/* Create a window */
cvNamedWindow("Stitching", CV_WINDOW_AUTOSIZE);
/* capture frame from video file */
CvCapture* capture = cvCreateFileCapture(fileName);
/* Create IplImage to point to each frame */
IplImage* frame;
IplImage before_frame;
Mat image1;
Mat image2;
cv::Mat result;
/* Loop until frame ended or ESC is pressed */
int loop=0;
//imshow( "Result", Stitching(image1,image2));
while(1) {
frame = cvQueryFrame(capture);
if(loop>0){
if(!frame) break;
image2=Mat(frame, false);
result=Stitching(image1,image2);
before_frame=result;
frame=&before_frame;
image1=result;
image2.release();
//imshow("Stitching",frame);
cvShowImage("Stitching",frame);
//break;
}else if(loop==0){
//Mat aimage1(frame);
image1=Mat(frame, false);
}
loop++;
char c = cvWaitKey(33);
if(c==27) break;
}
cvReleaseCapture(&capture);
/* delete window */
// cvDestroyWindow("Stitching");
// return EXIT_SUCCESS;
waitKey(0);
return 0;
}
if i load from image file, it works, image stitched, but when i try to stitch image from every video frame , it shows error
First-chance exception at 0x000007f886dd64a8 in matchingHomography.exe: Microsoft C++ exception: cv::Exception at memory location 0x0080e3b0..
Unhandled exception at 0x000007f886dd64a8 in matchingHomography.exe: Microsoft C++ exception: cv::Exception at memory location 0x0080e3b0..
line error
Mat H = findHomography( obj, scene, CV_RANSAC );
what's the error mean? and how to solve it
thanks
First off, you seem to be mixing C and C++ interfaces of OpenCV (OpenCV VideoCapture doc). For better readability stick to one of them (Since you are using C++ just stick to using C++ functions).
Since loading from image works, but Video doesn't, your video loading is probably the problem.
Try using cv::imshow("testWindow", frame) to show the frame loaded from video. Most likely there was no frame loaded.
One possible cause is that the video file is encoded in a format not supported by OpenCV. To check you can also run grab() and then retrieve(). The grab function will return if it was successful or not. Try grabbing a couple of frames, if all of them fail you probably don't have the necessary codec to decode this video.

Eye Blinking Detection

Some warnings appear in terminal during running:
OpenCV Error: Assertion failed(s>=0) in setSize, file /home/me/opencv2.4/modules/core/src/matrix.cpp, line 116
The program compiled without error and executes, the problem is the eye ROI size changes when user moves closer/farther away from webcam, due to the changing of size, the warning appears. I managed to solve these warnings by setting the eye ROI size equal to my eye template size. However, it ends up the program fails to classify user's eyes open/close because the minVal obtained is 0. The method used is OpenCV Template Matching. Alternatively, I fix my distance from webcam and fix the eye template size could avoid the warning. Every time warning appears, the program fails to classify open/close eyes. The program doesn't work effectively because sometimes it mistakenly classifies the open eyes as closed and vice versa.
Questions:
Is there any alternative to identify open and close eyes other than template matching?
Any ideas how to improve the program in classification of blinking?
Any working example that you know in opencv C/C++ API can classify open and close eyes and count accurately the blinking times?
static CvMemStorage* storage = 0;
// Create a new Haar classifier
static CvHaarClassifierCascade* cascade = 0;
// Function prototype for detecting and drawing an object from an image
bool detect_and_draw( IplImage* image ,CvHaarClassifierCascade* cascade);
const char *cascade_name[1]={"eyes.xml"};
cv::Mat roiImg;
int threshold_value = 200;
int threshold_type = 3;;
int const max_value = 255;
int const max_type = 4;
int const max_BINARY_value = 255;
int hough_thr = 35;
cv::Mat src_gray, dst;
using namespace cv;
Mat img1; Mat img2; Mat templ; Mat result;
const char* image_window = "Source Image";
const char* result_window = "Result window";
int match_method=0;
int max_Trackbar = 5;
int eye_open=0;
int eye_close=0;
//Matching with 2 images ,eye closed or open
void MatchingMethod(cv::Mat templ,int id )
{
/// Source image to display
cv::Mat img_display;
roiImg.copyTo( img_display );
/// Create the result matrix
int result_cols = roiImg.cols - templ.cols + 1;
int result_rows = roiImg.rows - templ.rows + 1;
result.create( result_cols, result_rows, CV_32FC1 );
/// Do the Matching and Normalize
cv::matchTemplate( roiImg, templ, result, match_method );
cv::normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
cv::Point matchLoc;
cv::minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
///Justing checkin the match template value reaching the threashold
if(id == 0 && (minVal < 0))
{
eye_open=eye_open+1;
if(eye_open == 1)
{
std::cout<<"Eye Open"<<std::endl;
eye_open=0;
eye_close=0;
}
}
else if(id == 1 && (minVal < 0))
eye_close=eye_close+1;
if(eye_close == 1)
{
std::cout<<"Eye Closed"<<std::endl;
eye_close=0;
system("python send_arduino.py");
}
/// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better
if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
{ matchLoc = minLoc; }
else
{ matchLoc = maxLoc; }
/// Show me what you got
cv::rectangle( img_display, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
cv::rectangle( result, matchLoc, Point( matchLoc.x + templ.cols , matchLoc.y + templ.rows ), Scalar::all(0), 2, 8, 0 );
cv::imshow( image_window, img_display );
cv::imshow( result_window, result );
return;
}
void detect_blink(cv::Mat roi)
{
try
{
MatchingMethod(img1,0);
MatchingMethod(img2,1);
}
catch( cv::Exception& e )
{
std::cout<<"An exception occued"<<std::endl;
}
}
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
if(argc <= 1)
{
std::cout<<"\n Help "<<std::endl;
std::cout<<"\n ------------------------------------\n"<<std::endl;
std::cout<<"./blink_detect open_eye.jpg close_eye.jpg\n"<<std::endl;
std::cout<<"Eg :: ./blink_detect 2.jpg 3.jpg\n"<<std::endl;
std::cout<<"\n ------------------------------------\n"<<std::endl;
exit(0);
}
// Structure for getting video from camera or avi
CvCapture* capture = 0;
// Images to capture the frame from video or camera or from file
IplImage *frame, *frame_copy = 0;
// Used for calculations
int optlen = strlen("--cascade=");
// Input file name for avi or image file.
const char* input_name;
img1 = imread( argv[1], 1 );
img2 = imread( argv[2], 1 );
// Load the HaarClassifierCascade
/// Create windows
cv::namedWindow( image_window, CV_WINDOW_AUTOSIZE );
cv::namedWindow( result_window, CV_WINDOW_AUTOSIZE );
// Allocate the memory storage
storage = cvCreateMemStorage(0);
capture = cvCaptureFromCAM( 0);
// Create a new named window with title: result
cvNamedWindow( "original_frame", 1 );
// If loaded succesfully, then:
if( capture )
{
// Capture from the camera.
for(;;)
{
// Capture the frame and load it in IplImage
if( !cvGrabFrame( capture ))
break;
frame = cvRetrieveFrame( capture );
// If the frame does not exist, quit the loop
if( !frame )
break;
// Allocate framecopy as the same size of the frame
if( !frame_copy )
frame_copy = cvCreateImage( cvSize(frame->width,frame->height),
IPL_DEPTH_8U, frame->nChannels );
// Check the origin of image. If top left, copy the image frame to frame_copy.
if( frame->origin == IPL_ORIGIN_TL )
cvCopy( frame, frame_copy, 0 );
// Else flip and copy the image
for(int i=0;i<1;i++)
{
cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name[i], 0, 0, 0 );
// Check whether the cascade has loaded successfully. Else report and error and quit
if( !cascade )
{
fprintf( stderr, "ERROR: Could not load classifier cascade\n" );
return -1;
}
// Call the function to detect and draw the face
if(detect_and_draw(frame_copy,cascade))
{
std::cout<<"Detected"<<std::endl;
}
}
// Wait for a while before proceeding to the next frame
if( cvWaitKey( 1 ) >= 0 )
break;
}
// Release the images, and capture memory
cvReleaseHaarClassifierCascade(&cascade);
cvReleaseImage( &frame_copy );
cvReleaseCapture( &capture );
cvReleaseMemStorage(&storage);
}
return 0;
}
// Function to detect and draw any faces that is present in an image
bool detect_and_draw( IplImage* img,CvHaarClassifierCascade* cascade )
{
int scale = 1;
// Create a new image based on the input image
IplImage* temp = cvCreateImage( cvSize(img->width/scale,img->height/scale), 8, 3 );
// Create two points to represent the face locations
CvPoint pt1, pt2;
int i;
// Clear the memory storage which was used before
cvClearMemStorage( storage );
// Find whether the cascade is loaded, to find the faces. If yes, then:
if( cascade )
{
// There can be more than one face in an image. So create a growable sequence of faces.
// Detect the objects and store them in the sequence
CvSeq* faces = cvHaarDetectObjects( img, cascade, storage,
1.1, 8, CV_HAAR_DO_CANNY_PRUNING,
cvSize(40, 40) );
// Loop the number of faces found.
for( i = 0; i < (faces ? faces->total : 0); i++ )
{
// Create a new rectangle for drawing the face
CvRect* r = (CvRect*)cvGetSeqElem( faces, i );
// Find the dimensions of the face,and scale it if necessary
pt1.x = r->x*scale;
pt2.x = (r->x+r->width)*scale;
pt1.y = r->y*scale;
pt2.y = (r->y+r->height)*scale;
// Draw the rectangle in the input image
cvRectangle( img, pt1, pt2, CV_RGB(255,0,0), 3, 8, 0 );
cv::Mat image(img);
cv::Rect rect;
rect = cv::Rect(pt1.x,pt1.y,(pt2.x-pt1.x),(pt2.y-pt1.y));
roiImg = image(rect);
cv::imshow("roi",roiImg);
///Send to arduino
detect_blink(roiImg);
}
}
cvShowImage( "original_frame", img );
if(i > 0)
return 1;
else
return 0;
cvReleaseImage( &temp );
}
Reference:
Website referred