cvSetImageROI to detect eyes - c++

In the following code I'm trying to detect face and eye. from a video.
My problem is that I'm trying to set ROI to detect eyes . but I think there an error in cvSetImageROI funcition .
this error is displayed
error C2664: 'cvSetImageROI' : cannot convert parameter 1 from 'cv::Mat' to 'IplImage *'
Thanks for helping me
#include<stdio.h>
#include<math.h>
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv2\objdetect\objdetect.hpp>
#include<opencv2\highgui\highgui.hpp>
#include<opencv2\imgproc\imgproc.hpp>
#include<vector>
using namespace cv;
using namespace std;
int main()
{
CascadeClassifier face_cascade, eye_cascade;
if(!face_cascade.load("haarcascade_frontalface_alt2.xml")) {
printf("Error loading cascade file for face");
return 1;
}
if(!eye_cascade.load("haarcascade_eye.xml")) {
printf("Error loading cascade file for eye");
return 1;
}
VideoCapture capture("w.mp4"); //-1, 0, 1 device id
if(!capture.isOpened())
{
printf("error to initialize camera");
return 1;
}
Mat cap_img,gray_img;
vector<Rect> faces, eyes;
while(1)
{
capture >> cap_img;
waitKey(10);
cvtColor(cap_img, gray_img, CV_BGR2GRAY);
cv::equalizeHist(gray_img,gray_img);
face_cascade.detectMultiScale(gray_img, faces, 1.1, 10, CV_HAAR_SCALE_IMAGE | CV_HAAR_DO_CANNY_PRUNING, cvSize(0,0), cvSize(300,300));
for(int i=0; i < faces.size();i++)
{
Point pt1(faces[i].x+faces[i].width, faces[i].y+faces[i].height);
Point pt2(faces[i].x,faces[i].y);
Mat faceROI = gray_img(faces[i]);
cvSetImageROI(faceROI, cvRect(faces->x,faces->y + (faces->height)/5,faces->width, (faces->height)/3 );
eye_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30,30));
for(size_t j=0; j< eyes.size(); j++)
{
//Point center(faces[i].x+eyes[j].x+eyes[j].width*0.5, faces[i].y+eyes[j].y+eyes[j].height*0.5);
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound((eyes[j].width+eyes[j].height)*0.25);
circle(cap_img, center, radius, Scalar(255,0,0), 2, 8, 0);
}
rectangle(cap_img, pt1, pt2, cvScalar(0,255,0), 2, 8, 0);
}
imshow("Result", cap_img);
waitKey(3);
char c = waitKey(3);
if(c == 27)
break;
}
return 0;
}

The problem is that you are trying to use old openCV method. Your image is in the Mat format and cvSetImageROI cannot take Mat image as an argument.
Suggestion:
Rect region_of_interest = Rect(x, y, w, h);
Mat image_roi = image(region_of_interest);

Related

Crop and show image that i get from Haar Cascade

I have trained Haar cascade and now i need to work with founded object. How i can crop it from original image and show in new window?(or show multiple window if i found 2 object on image). There is my code (opencv ver 2.4.13):
#include <opencv2/opencv.hpp>
#include <iostream>
#include <fstream>
using namespace std;
using namespace cv;
int main(void)
{
CascadeClassifier trafficLightCascader;
string Cascade_name = "TrafficLight.xml";
if (!trafficLightCascader.load(Cascade_name))
{
cout << "Can't load the face feature data" << endl;
return -1;
}
vector<Rect> trafficLights;
Mat src = imread("6копия.png");
CvRect AssignRect = Rect(0, 0, src.cols, src.rows / 2);
Mat srcImage = src(AssignRect);
Mat grayImage(srcImage.rows, srcImage.cols, CV_8UC1);
cvtColor(srcImage, grayImage, CV_BGR2GRAY);
equalizeHist(grayImage, grayImage);
trafficLightCascader.detectMultiScale(grayImage, trafficLights, 1.1, 1, 0, Size(3,3));
for (int i = 0; i < trafficLights.size(); ++i)
{
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}
imshow("src", src);
waitKey(0);
return 0;}
Your trafficLights vector is holding each rectangle's data of found objects. You just need to take left&top coordinates, width and height of each rectangle and you already have them. All you need is cropping each rectangle by creating Mat format of them and showing in different frames.
You can check here to learn more about cropping.
Here is the code which you need:
for (int i = 0; i < trafficLights.size(); ++i)
{
Rect crop_found(trafficLights[i].x,trafficLights[i].y, trafficLights[i].width, trafficLights[i].height);
Mat found(src, crop_found);
imshow(to_string(i),found);
rectangle(src, trafficLights[i], Scalar(0, 255, 0), 2, 8, 0);
}

Cropping an triangle from captured frame - OpenCV and C++

I have a video file from which I'm capturing a frames. I want to crop a triangle from captured frame and display it, but my program shows just a source frame.
Here is my code:
cv::Mat Detector::cropRegionOfInterest(cv::Mat& frame)
{
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.size(), CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
cv::Mat result(frame.size(), CV_8UC3);
cv::bitwise_and(frame, mask, result);
return result;
}
Instead of displaying source frame I want it to display cropped triangle.
Since you're using CV_8UC3 as the type of result, I'm assuming (see the Edit at the end of the answer if that's not the case) that the input image frame also has 3 channels. In that case, I'm a bit surprised that you can even see the non-cropped image, as running your code simply throws an exception on my machine at the call to bitwise_and:
OpenCV(3.4.1) Error: Sizes of input arguments do not match
From the documentation, it seems to me that you can't mix different input and mask types. A quick and dirty solution is to split the input image into a vector of three channels, call bitwise_and for each of them, and then merge them back. The code below works for me:
#include <stdio.h>
#include <opencv2/opencv.hpp>
using namespace cv;
cv::Mat cropRegionOfInterest(cv::Mat& frame)
{
const int frameWidth=frame.cols-1;
const int frameHeight=frame.rows-1;
cv::Point corners[1][3];
corners[0][0] = cv::Point(0, frameHeight);
corners[0][1] = cv::Point(frameWidth, frameHeight);
corners[0][2] = cv::Point(frameWidth / 2, frameHeight / 2);
const cv::Point* cornerList[1] = { corners[0] };
int numPoints = 3;
int numPolygons = 1;
cv::Mat mask(frame.rows,frame.cols, CV_8UC1, cv::Scalar(0, 0, 0));
cv::fillPoly(mask, cornerList, &numPoints, numPolygons, cv::Scalar(255, 255, 255), 8);
std::vector<cv::Mat> src_channels;
std::vector<cv::Mat> result_channels;
cv::split(frame,src_channels);
for(int idx=0;idx<3;++idx)
{
result_channels.emplace_back(frame.rows,frame.cols,CV_8UC1);
cv::bitwise_and(src_channels[idx], mask,result_channels[idx]);
}
cv::Mat result;
cv::merge(result_channels,result);
return result;
}
int main(int argc, char** argv )
{
if ( argc != 2 )
{
printf("usage: DisplayImage.out <Image_Path>\n");
return -1;
}
Mat image;
image = imread( argv[1], 1 );
if ( !image.data )
{
printf("No image data \n");
return -1;
}
cv::Mat cropped=cropRegionOfInterest(image);
namedWindow("cropped Image", WINDOW_AUTOSIZE );
imshow("cropped Image", cropped);
waitKey(0);
return 0;
}
Edit: From your comments it seems that frame is actually grayscale. In that case, nevermind all the code above, and just change cv::Mat result(frame.size(), CV_8UC3); to
cv::Mat result(frame.rows,frame.cols,CV_8UC1);
in your original code.

Vehicle tracking with optical flow using haar input

My difficulty in implementing optical flow method to track vehicles with input from the haar cascade.
So far I can only implement Optical flow but input not from Haar Cascade.
can you help me .. ??
this is my code
using namespace cv;
using namespace std;
int main()
{
int count= 0; double areax, areay, KoorX, KoorY;
Mat prev_frame, gray, temp, prev_img;
Mat frameROI, imgROI, frameLKP;
Mat ROI, prevROI, ROIOF;
//Parameter OFLKP
int win_size = 24;
int maxCorners =24;
int maxlevel =8;
TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS,20,0.01);
vector<uchar>found;
vector<float>error;
//Parameter Shi-Tomasi
vector<Point2f> prevcorners, corners;
double qualityLevel = 0.05; //0.4
double minDistance = 1; //2
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
vector<Point2f> frame_corners; // CvPoint array of features
frame_corners.reserve(maxCorners);
vector<Point2f> prevframe_corners;
prevframe_corners.reserve(maxCorners);
//=======> Manggil dan buka video
VideoCapture video("Uji1.avi");
//=======> Manggil .xml
CascadeClassifier Casmobil;
String Casmobil_file = "car2500.xml";
Casmobil.load(Casmobil_file);
namedWindow("Video", 1);
namedWindow("Tracking OF", 1);
namedWindow("Deteksi Haar", 1);
video >> prev_frame;
Rect roi = Rect(50, 180, 540, 240);
prevROI=prev_frame(roi);
cvtColor(prevROI, gray, CV_BGR2GRAY);
gray.convertTo(prev_img, CV_8UC1);
while(true)
{
//=====> baca frame dr video
video >> frameROI;
//=====> ROI
Rect roi = Rect(50, 180, 540, 240);
Mat ROI=frameROI(roi);
cvtColor(ROI, gray, CV_BGR2GRAY); //=====> RGB to Grayscale
gray.convertTo(imgROI, CV_8UC1);
Mat ROIOF = frameROI(roi);
//======> Deteksi
vector<Rect> mobil;
Casmobil.detectMultiScale(gray, mobil, 1.1, 3,
CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE,
Size(0,0));
//======> Gambar kotak
for (size_t i = 0; i < mobil.size(); i++)
{
Rect kotak = mobil[i];
areax = (mobil[i].x + mobil[i].width*0.5);
areay = (mobil[i].y + mobil[i].height*0.5);
Point center = Point(areax ,areay);
rectangle(ROI, kotak,CV_RGB(0,255,0),2,8,0);
circle(ROI, center, 3,CV_RGB(255, 0, 0),-2);
}
//prev_frame
goodFeaturesToTrack(imgROI, frame_corners,maxCorners,
qualityLevel,minDistance,Mat(),
blockSize,useHarrisDetector,k);
cornerSubPix(imgROI, frame_corners, Size(win_size, win_size),
Size( -1, -1 ),termcrit);
calcOpticalFlowPyrLK(imgROI, prev_img, frame_corners,
prevframe_corners, found, error,
Size(win_size, win_size), maxlevel,termcrit);
for( int j = 0; j < frame_corners.size(); j++ )
{
circle(ROIOF, frame_corners[j], 2, CV_RGB(255, 0, 0), -1);
circle(ROIOF, prevframe_corners[j], 2, CV_RGB(0, 0, 255), -1);
//circle(copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 );
line(ROIOF, prevframe_corners[j], frame_corners[j], CV_RGB(0, 255, 0),2, 8, 0);
}
prev_img = imgROI.clone();
imshow("Video ", frameROI);
imshow("Deteksi Haar", ROI);
imshow("Tracking OF", ROIOF);
if(waitKey(400) >= 0) break;
}
return 0;
}
Thanks,,
do I need to replace the input image from goodfeaturesToTrack by croping images from Haar Results ??
like :
Mat Crop = imgROI(mobil[i]);
goodFeaturesToTrack(Crop,frame_corners,maxCorners,qualityLevel,minDistance,Mat(),blockSize,useHarrisDetector,k);

Debug Assertion failed ! in detectAndDisplay function of face recognition openCv

I am trying to run the program and is building well but not able to debug. Capturing first frame and then giving below error.
I tried to check in debug mode and could figure out that
imshow(window_name, frame);
Error !!!
Here is the working code that is copied from OpenCV blog.
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay(Mat frame);
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
/** #function main */
int main(int argc, const char** argv)
{
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading\n"); return -1; };
if (!eyes_cascade.load(eyes_cascade_name)) { printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM(CV_CAP_ANY);
if (capture)
{
while (true)
{
frame = cvQueryFrame(capture);
//-- 3. Apply the classifier to the frame
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
printf(" --(!) No captured frame -- Break!");
break;
}
int c = waitKey(10);
if ((char)c == 'c') { break; }
}
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, CV_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
//-- Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
for (size_t i = 0; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5);
ellipse(frame, center, Size(faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
Mat faceROI = frame_gray(faces[i]);
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
for (size_t j = 0; j < eyes.size(); j++)
{
Point center(faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5);
int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
circle(frame, center, radius, Scalar(255, 0, 0), 4, 8, 0);
}
}
//-- Show what you got
imshow(window_name, frame);
}
Error when value chosen in
capture = cvCaptureFromCAM(-1)
is -1.
I am getting GRey window with no Error.
imshow with Grey Window, No Error
I changed the Platform Toolset from Visual Studio 2015(v140) to Visual Studio 2013(v120) as in the figure attached. I do not know why but it worked.
Visual Studio 2015(v140)
Visual Studio 2013(v120)
It may happen, if your face ROI, out of frame bounds.
Because you trying to copy this region you getting memory error.
if(faces[i].x>=0 && faces[i].y >= 0 && faces[i].x+faces[i].width<frame_gray.cols && faces[i].y+faces[i].height < frame_gray.rows)
{
faceROI=frame_gray(faces[i]);
}

Real-time template matching - OpenCV, C++

I am trying to implement real-time tracking using templates. I wish to update the template with every frame. The main modifications I have done are:
1) separated the template matching and minmaxLoc into separate modules namely, TplMatch() and minmax() functions, respectively.
2) Inside the track() function, the select_flag is kept always true so that new template is copied to 'myTemplate' with every iteration.
3) The last 3 lines of function track() are to update the template (roiImg).
4) Also, I have removed any arguments to track() function, since, img and roiImg are global variables and hence no need to pass them to functions.
Following is the code:
#include <iostream>
#include "opencv2/opencv.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <sstream>
using namespace cv;
using namespace std;
Point point1, point2; /* vertical points of the bounding box */
int drag = 0;
Rect rect; /* bounding box */
Mat img, roiImg; /* roiImg - the part of the image in the bounding box */
int select_flag = 0;
bool go_fast = false;
Mat mytemplate;
///------- template matching -----------------------------------------------------------------------------------------------
Mat TplMatch( Mat &img, Mat &mytemplate )
{
Mat result;
matchTemplate( img, mytemplate, result, CV_TM_SQDIFF_NORMED );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
return result;
}
///------- Localizing the best match with minMaxLoc ------------------------------------------------------------------------
Point minmax( Mat &result )
{
double minVal, maxVal;
Point minLoc, maxLoc, matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
matchLoc = minLoc;
return matchLoc;
}
///------- tracking --------------------------------------------------------------------------------------------------------
void track()
{
if (select_flag)
{
roiImg.copyTo(mytemplate);
// select_flag = false;
go_fast = true;
}
// imshow( "mytemplate", mytemplate ); waitKey(0);
Mat result = TplMatch( img, mytemplate );
Point match = minmax( result );
rectangle( img, match, Point( match.x + mytemplate.cols , match.y + mytemplate.rows ), CV_RGB(255, 255, 255), 0.5 );
std::cout << "match: " << match << endl;
/// latest match is the new template
Rect ROI = cv::Rect( match.x, match.y, mytemplate.cols, mytemplate.rows );
roiImg = img( ROI );
imshow( "roiImg", roiImg ); //waitKey(0);
}
///------- MouseCallback function ------------------------------------------------------------------------------------------
void mouseHandler(int event, int x, int y, int flags, void *param)
{
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/// left button clicked. ROI selection begins
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/// mouse dragged. ROI being selected
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x, point1.y, x - point1.x, y - point1.y);
drag = 0;
roiImg = img(rect);
// imshow("MOUSE roiImg", roiImg); waitKey(0);
}
if (event == CV_EVENT_LBUTTONUP)
{
/// ROI selected
select_flag = 1;
drag = 0;
}
}
///------- Main() ----------------------------------------------------------------------------------------------------------
int main()
{
int k;
/*
///open webcam
VideoCapture cap(0);
if (!cap.isOpened())
return 1;*/
///open video file
VideoCapture cap;
cap.open( "Megamind.avi" );
if ( !cap.isOpened() )
{ cout << "Unable to open video file" << endl; return -1; }
/*
/// Set video to 320x240
cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);*/
cap >> img;
GaussianBlur( img, img, Size(7,7), 3.0 );
imshow( "image", img );
while (1)
{
cap >> img;
if ( img.empty() )
break;
// Flip the frame horizontally and add blur
cv::flip( img, img, 1 );
GaussianBlur( img, img, Size(7,7), 3.0 );
if ( rect.width == 0 && rect.height == 0 )
cvSetMouseCallback( "image", mouseHandler, NULL );
else
track();
imshow("image", img);
// waitKey(100); k = waitKey(75);
k = waitKey(go_fast ? 30 : 10000);
if (k == 27)
break;
}
return 0;
}
The updated template is not being tracked. I am not able to figure out why this is happening since I am updating my template (roiImg) with each iteration. The match value from minmax() function is returning the same point (coordinates) every-time. Test video is availbale at: http://www.youtube.com/watch?v=vpnkk7N2E0Q&feature=youtu.be
Please look into it and guide ahead...thanks a lot!
I get your original code from this revision of your question: https://stackoverflow.com/revisions/20180073/3
I made the smallest change to your original code, my resulting code is the following:
#include <iostream>
#include "opencv2/opencv.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <sstream>
using namespace cv;
using namespace std;
Point point1, point2; /* vertical points of the bounding box */
int drag = 0;
Rect rect; /* bounding box */
Mat img, roiImg; /* roiImg - the part of the image in the bounding box */
int select_flag = 0;
bool go_fast = false;
Mat mytemplate;
///------- template matching -----------------------------------------------------------------------------------------------
Mat TplMatch( Mat &img, Mat &mytemplate )
{
Mat result;
matchTemplate( img, mytemplate, result, CV_TM_SQDIFF_NORMED );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
return result;
}
///------- Localizing the best match with minMaxLoc ------------------------------------------------------------------------
Point minmax( Mat &result )
{
double minVal, maxVal;
Point minLoc, maxLoc, matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
matchLoc = minLoc;
return matchLoc;
}
///------- tracking --------------------------------------------------------------------------------------------------------
void track()
{
if (select_flag)
{
//roiImg.copyTo(mytemplate);
// select_flag = false;
go_fast = true;
}
// imshow( "mytemplate", mytemplate ); waitKey(0);
Mat result = TplMatch( img, mytemplate );
Point match = minmax( result );
rectangle( img, match, Point( match.x + mytemplate.cols , match.y + mytemplate.rows ), CV_RGB(255, 255, 255), 0.5 );
std::cout << "match: " << match << endl;
/// latest match is the new template
Rect ROI = cv::Rect( match.x, match.y, mytemplate.cols, mytemplate.rows );
roiImg = img( ROI );
roiImg.copyTo(mytemplate);
imshow( "roiImg", roiImg ); //waitKey(0);
}
///------- MouseCallback function ------------------------------------------------------------------------------------------
void mouseHandler(int event, int x, int y, int flags, void *param)
{
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/// left button clicked. ROI selection begins
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/// mouse dragged. ROI being selected
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x, point1.y, x - point1.x, y - point1.y);
drag = 0;
roiImg = img(rect);
roiImg.copyTo(mytemplate);
// imshow("MOUSE roiImg", roiImg); waitKey(0);
}
if (event == CV_EVENT_LBUTTONUP)
{
/// ROI selected
select_flag = 1;
drag = 0;
}
}
///------- Main() ----------------------------------------------------------------------------------------------------------
int main()
{
int k;
/*
///open webcam
VideoCapture cap(0);
if (!cap.isOpened())
return 1;*/
///open video file
VideoCapture cap;
cap.open( "Megamind.avi" );
if ( !cap.isOpened() )
{ cout << "Unable to open video file" << endl; return -1; }
/*
/// Set video to 320x240
cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);*/
cap >> img;
GaussianBlur( img, img, Size(7,7), 3.0 );
imshow( "image", img );
while (1)
{
cap >> img;
if ( img.empty() )
break;
// Flip the frame horizontally and add blur
cv::flip( img, img, 1 );
GaussianBlur( img, img, Size(7,7), 3.0 );
if ( rect.width == 0 && rect.height == 0 )
cvSetMouseCallback( "image", mouseHandler, NULL );
else
track();
imshow("image", img);
// waitKey(100); k = waitKey(75);
k = waitKey(go_fast ? 30 : 10000);
if (k == 27)
break;
}
return 0;
}
The video at https://www.youtube.com/watch?v=rBCopeneCos shows a test of the above program.
I would avoid the use of global variable because I think they do not help in understanding where the problems lie; furthermore I also would pay attention to the shallow vs deep copy for OpenCV's Mat class, as 1'' wrote in his answer:
OpenCV's Mat class is simply a header for the actual image data,
which it contains a pointer to. The operator= copies the pointer
(and the other information in the header, like the image dimensions)
so that both Mats share the same data. This means that modifying the
data in one Mat also changes it in the other. This is called a
"shallow" copy, since only the top layer (the header) is copied, not
the lower layer (the data).
To make a copy of the underlying data (called a "deep copy"), use the
clone() method. You can find information about it on the page that
you linked to.
Edit about the drift:
In comment Real-time template matching - OpenCV, C++, learner asks about the tracking drift.
Looking at the video https://www.youtube.com/watch?v=rBCopeneCos we see that at the beginning of the video the program is tracking the girl's right eye while at 0:15 it starts to track the girl's eyebrows, at 0:19 it starts to track the boy's eyebrows and it never tracks anymore the girl's eye, for example at 0:27 it tracks the girl's right eyebrow while the girl's right eye is clearly visible in the image.
This drift from tracking the eye to tracking the eyebrow is normal in a simple code as the one I posted and the explanation is quite simple: see the video at https://www.youtube.com/watch?v=sGHEu3u9XvI, the video starts with the tracking (contents of the black rectangle) of the playing card, then I remove the playing card from the scene and the tracking black rectangle "drifts" to the bottom left of the scene; after all we are continuosly updating the template and so the behavior is correct: the program stops to track the playing card and starts to track a white background and so you have the "drift"... in other words, your TplMatch() function will always return a valid result image and your current implementation of minmax() will always return a valid a minimum.
You can follow the OpenCV tutorial "Template Matching". Your track function may contain the code to find the template in the current frame; a simple code is based on the matchTemplate and minMaxLoc functions.
The interesting issue related to the "real-time" part of your question is to succeed in finding the match, if present, within the time between the current frame and the next one.
Edit:
The following quick-and-dirty code and the video at http://www.youtube.com/watch?v=vpnkk7N2E0Q&feature=youtu.be shows what I mean for tracking.
Since I do not have a webcam I slightly modified your code to just use a video, this one https://code.ros.org/trac/opencv/export/7237/trunk/opencv/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video/Megamind.avi
I then add track function and some logic to slow down the video until I choose a ROI and after that playing the video at normal speed.
#include <iostream>
#include "opencv2/opencv.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <sstream>
using namespace cv;
using namespace std;
Point point1, point2; /* vertical points of the bounding box */
int drag = 0;
Rect rect; /* bounding box */
Mat img, roiImg; /* roiImg - the part of the image in the bounding box */
int select_flag = 0;
bool go_fast = false;
Mat mytemplate;
void track(cv::Mat &img, const cv::Mat &templ, const cv::Rect &r )
{
static int n = 0;
if (select_flag)
{
templ.copyTo(mytemplate);
select_flag = false;
go_fast = true;
}
cv::Mat result;
/// Do the Matching and Normalize
matchTemplate( img, mytemplate, result, CV_TM_SQDIFF_NORMED );
normalize( result, result, 0, 1, NORM_MINMAX, -1, Mat() );
/// Localizing the best match with minMaxLoc
double minVal; double maxVal; Point minLoc; Point maxLoc;
Point matchLoc;
minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, Mat() );
matchLoc = minLoc;
rectangle( img, matchLoc, Point( matchLoc.x + mytemplate.cols , matchLoc.y + mytemplate.rows ), CV_RGB(255, 255, 255), 3 );
std::cout << matchLoc << "\n";
}
///MouseCallback function
void mouseHandler(int event, int x, int y, int flags, void *param)
{
if (event == CV_EVENT_LBUTTONDOWN && !drag)
{
/* left button clicked. ROI selection begins */
point1 = Point(x, y);
drag = 1;
}
if (event == CV_EVENT_MOUSEMOVE && drag)
{
/* mouse dragged. ROI being selected */
Mat img1 = img.clone();
point2 = Point(x, y);
rectangle(img1, point1, point2, CV_RGB(255, 0, 0), 3, 8, 0);
imshow("image", img1);
}
if (event == CV_EVENT_LBUTTONUP && drag)
{
point2 = Point(x, y);
rect = Rect(point1.x, point1.y, x - point1.x, y - point1.y);
drag = 0;
roiImg = img(rect);
}
if (event == CV_EVENT_LBUTTONUP)
{
/* ROI selected */
select_flag = 1;
drag = 0;
}
}
///Main function
int main()
{
int k;
/*
VideoCapture cap(0);
if (!cap.isOpened())
return 1;
*/
VideoCapture cap;
//cap.open("~/Downloads/opencv-2.4.4/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video/Megamind.avi");
cap.open("./Megamind.avi");
if (!cap.isOpened())
{
printf("Unable to open video file\n");
return -1;
}
/*
// Set video to 320x240
cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
*/
cap >> img;
imshow("image", img);
while (1)
{
cap >> img;
if (img.empty())
break;
if (rect.width == 0 && rect.height == 0)
cvSetMouseCallback("image", mouseHandler, NULL);
else
track(img, roiImg, rect);
if (select_flag == 1)
imshow("Template", roiImg);
imshow("image", img);
k = waitKey(go_fast ? 30 : 10000);
if (k == 27)
break;
}
return 0;
}
You can also have a general introduction to the subject starting from this wikipedia page http://en.wikipedia.org/wiki/Video_tracking