This is HandKeypoint Detection using caffe deep learning on opencv the problem is When i run the code it is performing very slow untill the video is freezing for sometime but when i tried by using a static image it is performing as normal. I have declare the Net class outside of the while loop too but it is still the same the video does not run smothly. How to make the video to run smoothly?
#include <opencv2/opencv.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/dnn.hpp>
#include <iostream>
using namespace std;
using namespace cv;
using namespace cv::dnn;
const int POSE_PAIRS[20][2] =
{
{0,1}, {1,2}, {2,3}, {3,4}, // thumb
{0,5}, {5,6}, {6,7}, {7,8}, // index
{0,9}, {9,10}, {10,11}, {11,12}, // middle
{0,13}, {13,14}, {14,15}, {15,16}, // ring
{0,17}, {17,18}, {18,19}, {19,20} // small
};
string protoFile = "/home/hanish/Test_Opencv/HandKeyPoint/Models/pose_deploy.prototxt";
string weightsFile = "/home/hanish/Test_Opencv/HandKeyPoint/Models/pose_iter_102000.caffemodel";
int nPoints = 22;
int main(int argc, char **argv)
{
float thresh = 0.01;
VideoCapture cap(0);
Mat frame, frameCopy;
int frameWidth = cap.get(CAP_PROP_FRAME_WIDTH);
int frameHeight = cap.get(CAP_PROP_FRAME_HEIGHT);
float aspect_ratio = frameWidth/(float)frameHeight;
int inHeight = 368;
int inWidth = (int(aspect_ratio*inHeight) * 8) / 8;
cout << "inWidth = " << inWidth << " ; inHeight = " << inHeight << endl;
Net net = readNetFromCaffe(protoFile, weightsFile);
net.setPreferableBackend(DNN_TARGET_CPU);
while(1)
{
cap >> frame;
frameCopy = frame.clone();
Mat inpBlob = blobFromImage(frame, 1.0 / 255, Size(inWidth, inHeight), Scalar(0, 0, 0), false, false);
net.setInput(inpBlob);
Mat output = net.forward();
int H = output.size[2];
int W = output.size[3];
// find the position of the body parts
vector<Point> points(nPoints);
for (int n=0; n < nPoints; n++)
{
// Probability map of corresponding body's part.
Mat probMap(H, W, CV_32F, output.ptr(0,n));
resize(probMap, probMap, Size(frameWidth, frameHeight));
Point maxLoc;
double prob;
minMaxLoc(probMap, 0, &prob, 0, &maxLoc);
if (prob > thresh)
{
circle(frameCopy, cv::Point((int)maxLoc.x, (int)maxLoc.y), 8, Scalar(0,255,255), -1);
putText(frameCopy, cv::format("%d", n), cv::Point((int)maxLoc.x, (int)maxLoc.y), cv::FONT_HERSHEY_COMPLEX, 1, cv::Scalar(0, 0, 255), 2);
}
points[n] = maxLoc;
}
int nPairs = sizeof(POSE_PAIRS)/sizeof(POSE_PAIRS[0]);
for (int n = 0; n < nPairs; n++)
{
// lookup 2 connected body/hand parts
Point2f partA = points[POSE_PAIRS[n][0]];
Point2f partB = points[POSE_PAIRS[n][1]];
if (partA.x<=0 || partA.y<=0 || partB.x<=0 || partB.y<=0)
continue;
line(frame, partA, partB, Scalar(0,255,255), 8);
circle(frame, partA, 8, Scalar(0,0,255), -1);
circle(frame, partB, 8, Scalar(0,0,255), -1);
}
// imshow("Output-Keypoints", frameCopy);
imshow("Output-Skeleton", frame);
waitKey(1);
}
return 0;
}
Related
I am a newbie to c++ and the IDE I am using is Visual Studio '22. I have written a code to detect a face (eyes and mouth too) and save the roi to a folder on the pc. Now what it does can be thought of as an auto-capture of the roi as soon as the face is detected.
I now want to create the function for "force capture", for which I will need to have a button and add pretty much the same code I wrote for auto-capture to give it functionality.
How do I add the button and make it perform its task?
I found related answers but they use Qt not sure how to apply that here.
Thanks a ton! Really need help.
#include <opencv2/opencv.hpp>
#include <opencv2/dnn/dnn.hpp>
//(1) include face header
#include "opencv2/face.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
//(2) include face header
#include "opencv2/objdetect.hpp"
#include <iostream>
#include <opencv2/imgproc/types_c.h>
//file handling
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
using namespace ml;
using namespace cv::face;
//(3) Global variables
Ptr<Facemark> facemark; //mark detection
CascadeClassifier faceDetector, mouth, eye, eye1; //face detection
string name, filename;
void process(Mat img, Mat imgcol) {
vector<Rect> faces;
faceDetector.detectMultiScale(img, faces);
Mat imFace;
if (faces.size() != 0) {
for (size_t i = 0; i < faces.size(); i++)
{
cv::rectangle(imgcol, faces[i], Scalar(255, 0, 0));
imFace = imgcol(faces[i]);
resize(imFace, imFace, Size(imFace.cols * 5, imFace.rows * 5));
faces[i] = Rect(faces[i].x = 0, faces[i].y = 0, faces[i].width * 5,
(faces[i].height) * 5);
}
vector< vector<Point2f> > shapes;
//vector < Rect > measures;
if (facemark->fit(imFace, faces, shapes)) //fiiting predef shapes in faces// // imface is the size of faces
{
for (unsigned long i = 0; i < faces.size(); i++) {
for (unsigned long k = 0; k < shapes[i].size(); k++) {
cv::circle(imFace, shapes[i][k], 5, cv::Scalar(0, 0, 255), FILLED);
}
}
}
namedWindow("Detected_shape");
imshow("Detected_shape", imFace);
waitKey(5);
}
else {
cout << "Faces not detected." << endl;
}
}
int main()
{
facemark = FacemarkLBF::create();
facemark->loadModel("C:/Dev/HeadPose/HeadPose/lbfmodel.yml");
faceDetector.load("D:/opencv/build/install/etc/haarcascades/haarcascade_frontalface_alt2.xml");
mouth.load("D:/opencv/build/install/etc/haarcascades/haarcascade_smile.xml");
eye.load("D:/opencv/build/install/etc/haarcascades/haarcascade_eye.xml");
cout << "Loaded model" << endl;
Mat frame, grayframe, testframe, faceROI;
int x_axis, y_axis;
namedWindow("Detecting");
VideoCapture cap(0); //1 for diff cam
while (1)
{
cap.read(frame);
if (!cap.read(frame))
{
cout << "an error while taking the frame from cap" << endl;
}
//face
vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, grayframe, CV_BGR2GRAY);
equalizeHist(grayframe, testframe);
faceDetector.detectMultiScale(testframe, faces, 1.1, 3, CASCADE_SCALE_IMAGE, Size(30, 30));
Rect roi_b;
Rect roi_c;
size_t ic = 0;
int ac = 0;
size_t ib = 0;
int ab = 0;
for (int ic = 0; ic < faces.size(); ic++)
{
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR);
cvtColor(crop, gray, COLOR_BGR2GRAY);
stringstream ssfn;
filename = "C:\\Users\\Hp\\Desktop\\Faces\\";
ssfn << filename.c_str() << name <<"_"<< roi_b.width<<"_"<< roi_b.height << ".jpg";
filename = ssfn.str();
imwrite(filename, res);
rectangle(frame, faces[ic], Scalar(255, 0, 255), 2, 8, 0);
Mat face = frame(faces[ic]);
cvtColor(face, face, CV_BGR2GRAY);
//mouth
vector <Rect> mouthi;
mouth.detectMultiScale(face, mouthi);
for (int k = 0; k < mouthi.size(); k++)
{
Point pt1(mouthi[0].x + faces[ic].x, mouthi[0].y + faces[ic].y);
Point pt2(pt1.x + mouthi[0].width, pt1.y + mouthi[0].height);
rectangle(frame, pt1, pt2, Scalar(255, 0, 0), 1, 8, 0);
}
//eyes
faceROI = frame(faces[ic]);//Taking area of the face as Region of Interest for eyes//
vector<Rect>eyes;//declaring a vector named eyes//
eye.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5)); //detect eyes in every face//
/*eye1.detectMultiScale(faceROI, eyes, 1.1, 3, 0 | CASCADE_SCALE_IMAGE, Size(5, 5));*/
for (size_t j = 0; j < eyes.size(); j++)
{ //for locating eyes//
Point center(faces[ic].x + eyes[j].x + eyes[j].width * 0.5, faces[ic].y + eyes[j].y + eyes[j].height * 0.5);//getting the centers of both eyes//
int radius = cvRound((eyes[j].width + eyes[j].height) * 0.25); //declaring radius of the eye enclosing circles//
// cout << "radius" << radius << endl;
circle(frame, center, radius, Scalar(255, 0, 0), 1, 8, 0);//drawing circle around both eyes//
x_axis = eyes[j].x;//storing x axis location of eyes in x_axis//
y_axis = eyes[j].y;//storing y axis location of eyes in y_axis//
cout << "Position of the eyes is:" << "(" << x_axis << "," << y_axis << ")" << endl;//showing co-ordinate values//
}
}
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
if (!crop.empty()) {
imshow("detected", crop);
}
else destroyWindow("detected");
cout << "Name\n";
cin >> name;
Mat img; //image containers
Mat imgbw;
cap >> img; //image from webcam
resize(img, img, Size(460, 460), 0, 0, INTER_LINEAR_EXACT);
cvtColor(img, imgbw, COLOR_BGR2GRAY);
process(imgbw, img);
imshow("Detecting", frame);
if (waitKey(30) == 27) {
break;
}
}
return 0;
}
I have a program to draw and detect the aruco markers, and write the marker id on it. I need a rectangle to appear on each marker instead of the marker id, i could draw a rectangle but in a fixed position not on the marker, here is the code:
#include <opencv2\highgui.hpp>
#include <opencv2\aruco.hpp>
#include <opencv2\core.hpp>
#include <opencv2\imgcodecs.hpp>
#include <opencv2\imgproc.hpp>
#include <opencv2\calib3d.hpp>
#include <sstream>
#include <fstream>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char *argv[]) {
cv::VideoCapture inputVideo;
inputVideo.open(0);
Mat outputMarker;
auto markerDict = aruco::getPredefinedDictionary(aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
for (int i = 0; i < 50; i++) {
aruco::drawMarker(markerDict, i, 500, outputMarker, 1);
ostringstream convert;
String imageName = "4x4marker_";
convert << imageName << i << ".jpg";
imwrite(convert.str(), outputMarker);
while (inputVideo.grab()) {
cv::Mat image, imageCopy;
inputVideo.retrieve(image);
image.copyTo(imageCopy);
std::vector<int> ids;
std::vector<std::vector<cv::Point2f> > corners;
cv::aruco::detectMarkers(image, markerDict, corners, ids);
// if at least one marker detected
if (ids.size() > 0)
cv::aruco::drawDetectedMarkers(imageCopy, corners, ids);
int x = 0;
int y = 3;
rectangle(imageCopy, Point(imageCopy.cols/2, imageCopy.rows/2),
Point(x,y),Scalar::all(255), -1, 8, 0);
cv::imshow("out", imageCopy);
char key = (char)cv::waitKey(5);
if (key == 27)
break;
}
}
}
An example marker for easing code testing.
When you use the detectMarkers function, it returns the corners of each detection. In your case, you are putting it into the std::vector<std::vector<cv::Point2f> > corners. To draw the rectangles you ask for, you can do something similar to (the following code is an example and has not been tested):
for (size_t i = 0; i< corners.size(); +i)
{
cv::Point2f p0(image.cols,image.rows);
cv::Ponit2f p1(0,0);
for (auto p: corners[i])
{
if (p.x < p0.x)
p0.x = p.x;
if (p.y < p0.y)
p0.y = p.y;
if (p.x > p1.x)
p1.x = p.x;
if (p.y > p1.y)
p1.y = p.y;
}
rectangle(imageCopy, p0, p1,Scalar::all(255), -1, 8, 0);
}
However, you probably want to draw a polygon that will better fit the marker, as the marker projection on the image will not be a rectangle if the marker is not perfectly perpendicular to the camera optical axis and there's no distortion. For that, you can use fillPoly, or if you don't want it filled line.
Here is a fully functional example tested with OpenCV 4.3.0-pre and 3.2.0 in Ubuntu and 3.4.9 in Windows:
#include <iostream>
#include <opencv2/aruco.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
int main(int argc, char** argv)
{
// Check cv versionmake
std::cout << "Using OpenCV version: " << CV_VERSION << std::endl;
// Create video input
cv::VideoCapture inputVideo;
int input_source(0);
if (argc > 1)
input_source = std::atoi(argv[1]);
if (!inputVideo.open(input_source))
{
std::cerr << "Error opening input video soruce: " << input_source << std::endl;
return EXIT_FAILURE;
}
// Create marker dictionary
auto marker_dict = cv::aruco::getPredefinedDictionary(cv::aruco::PREDEFINED_DICTIONARY_NAME::DICT_4X4_50);
// Get imshow ready
cv::namedWindow("Display window", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
cv::resizeWindow("Display window", 800, 600);
// Grab images until escape is pressed
int key = 0;
while (key != 27 && inputVideo.grab())
{
// Retrieve image
cv::Mat image;
inputVideo.retrieve(image);
// Get image output ready
cv::Size image_size = image.size();
cv::Mat out_image(image_size.height, 3 * image_size.width, CV_8UC3);
cv::Mat left(out_image, cv::Rect(0, 0, image_size.width, image_size.height));
image.copyTo(left);
cv::Mat mid(out_image, cv::Rect(image_size.width, 0, image_size.width, image_size.height));
image.copyTo(mid);
cv::Mat right(out_image, cv::Rect(2 * image_size.width, 0, image_size.width, image_size.height));
image.copyTo(right);
// Add names to images
int corner_offset = 50;
cv::putText(left, "Original image", cv::Point(corner_offset, corner_offset), cv::FONT_HERSHEY_DUPLEX, 1.0,
CV_RGB(0, 0, 0), 2);
cv::putText(mid, "Image with OpenCV drawing", cv::Point(corner_offset, corner_offset), cv::FONT_HERSHEY_DUPLEX, 1.0,
CV_RGB(0, 0, 0), 2);
cv::putText(right, "Image with custom drawing", cv::Point(corner_offset, corner_offset), cv::FONT_HERSHEY_DUPLEX,
1.0, CV_RGB(0, 0, 0), 2);
// Detect markers
std::vector<int> ids;
std::vector<std::vector<cv::Point2f> > corners;
cv::aruco::detectMarkers(image, marker_dict, corners, ids);
// Draw markers using opencv tool
cv::aruco::drawDetectedMarkers(mid, corners, ids);
// Draw markers custom
for (size_t i = 0; i < corners.size(); ++i)
{
// Convert to integer ponits
int num = static_cast<int>(corners[i].size());
std::vector<cv::Point> points;
for (size_t j = 0; j < corners[i].size(); ++j)
points.push_back(cv::Point(static_cast<int>(corners[i][j].x), static_cast<int>(corners[i][j].y)));
const cv::Point* pts = &(points[0]);
// Draw
cv::fillPoly(right, &pts, &num, 1, cv::Scalar(255, 0, 0));
// Draw contour
for (size_t j = 0; j < corners[i].size(); ++j)
{
size_t next = (j + 1) % corners[i].size();
cv::line(right, corners[i][j], corners[i][next], cv::Scalar(0, 255, 0), 5);
}
}
// Display
cv::imshow("Display window", out_image);
key = cv::waitKey(5);
}
return EXIT_SUCCESS;
}
This is the output you will get with this code:
#apalomerHere is a marker to test
I am trying to take out the ground and make grids on it for path mapping and insert it back to the image. Here I am using findhomography and warpPerspective functions to do so. But when I switch the points for inserting back the modified plane, everything except the plane becomes black in the image.
I have tried to do it using an intermediate image but the result is the same.
#include "pch.h"
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data - > im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data - > im);
if (data - > points.size() < 4) {
data - > points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv)
{
// Read source image.
Mat im_src = imread("imagesindoor.jpg");
// Destination image. The aspect ratio of the book is 3/4
Size size(400, 300);
Size size2(im_src.cols, im_src.rows);
Mat im_dst = Mat::zeros(size, CV_8UC3);
// Create a vector of destination points.
vector<Point2f> pts_dst;
pts_dst.push_back(Point2f(0, 0));
pts_dst.push_back(Point2f(size.width - 1, 0));
pts_dst.push_back(Point2f(size.width - 1, size.height - 1));
pts_dst.push_back(Point2f(0, size.height - 1));
// Set data for mouse event
Mat im_temp = im_src.clone();
userdata data;
data.im = im_temp;
cout << "Click on the four corners of the book -- top left first and" <<
endl
<< "bottom left last -- and then hit ENTER" << endl;
// Show image and wait for 4 clicks.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
// Calculate the homography
Mat h = getPerspectiveTransform(data.points, pts_dst);
// Warp source image to destination
warpPerspective(im_src, im_dst, h, size);
// changing clor of im_dst
for (int i = 0; i < im_dst.rows; i++) {
for (int j = 0; j < im_dst.cols; j++) {
//apply condition here
im_dst.at<cv::Vec3b>(i, j) = 255;
}
}
Mat p = getPerspectiveTransform(pts_dst, data.points);
warpPerspective(im_dst, im_src, p, size2);
// Show image
//imshow("Image", im_dst);
imshow("Image2", im_src);
waitKey(0);
return 0;
}
addWeighted can be used to blend the current result with the source image to get the expected result.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
using namespace cv;
using namespace std;
struct userdata {
Mat im;
vector<Point2f> points;
};
void mouseHandler(int event, int x, int y, int flags, void* data_ptr)
{
if (event == EVENT_LBUTTONDOWN) {
userdata* data = ((userdata*)data_ptr);
circle(data-> im, Point(x, y), 3, Scalar(0, 0, 255), 5, LINE_AA);
imshow("Image", data->im);
if (data-> points.size() < 4) {
data-> points.push_back(Point2f(x, y));
}
}
}
int main(int argc, char** argv)
{
// Read source image.
Mat im_src = imread("test.png");
// Destination image. The aspect ratio of the book is 3/4
Size size(400, 300);
Size size2(im_src.cols, im_src.rows);
Mat im_dst = Mat::zeros(size, CV_8UC3);
// Create a vector of destination points.
vector<Point2f> pts_dst;
pts_dst.push_back(Point2f(0, 0));
pts_dst.push_back(Point2f(size.width - 1, 0));
pts_dst.push_back(Point2f(size.width - 1, size.height - 1));
pts_dst.push_back(Point2f(0, size.height - 1));
// Set data for mouse event
Mat im_temp = im_src.clone();
userdata data;
data.im = im_temp;
cout << "Click on the four corners of the book -- top left first and" <<
endl
<< "bottom left last -- and then hit ENTER" << endl;
// Show image and wait for 4 clicks.
imshow("Image", im_temp);
// Set the callback function for any mouse event
setMouseCallback("Image", mouseHandler, &data);
waitKey(0);
// Calculate the homography
Mat h = getPerspectiveTransform(data.points, pts_dst);
// Warp source image to destination
warpPerspective(im_src, im_dst, h, size);
// changing clor of im_dst
for (int i = 0; i < im_dst.rows; i++) {
for (int j = 0; j < im_dst.cols; j++) {
//apply condition here
im_dst.at<cv::Vec3b>(i, j) = 255;
}
}
Mat t;
Mat p = getPerspectiveTransform(pts_dst, data.points);
warpPerspective(im_dst, t, p, size2);
// Show image
//imshow("Image", im_dst);
std::cout << "t :" <<t.cols << ", " <<t.rows <<std::endl;
Mat final;
addWeighted(im_src, 0.5, t, 0.5, 0, final);
imshow("Image2", final);
waitKey(0);
return 0;
}
First of all sorry for my bad english.
So, im using dense optical flow, GunnarFarneback speciffically. The code works fine.
Here's the code:
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/video/tracking.hpp"
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
// Display the results of the matches
//
int main(int argc, char** argv)
{
cv::Mat res, img1, img2, img2Original, img2OriginalC;
cv::VideoWriter writer;
cv::VideoCapture cap;
//cv::VideoCapture cap("prueba1.mp4");
// cap.open(std::string(argv[1]));
cap.open(0);
cv::namedWindow("cat", cv::WINDOW_AUTOSIZE);
cap >> img1;
cvtColor(img1, img1, COLOR_BGR2GRAY);
double fps = cap.get(cv::CAP_PROP_FPS);
//cv::Size tamano((int)cap.get(cv::CAP_PROP_FRAME_WIDTH), (int)cap.get(cv::CAP_PROP_FRAME_HEIGHT));
Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));
writer.open("mouse.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, S);
for (;;) {
cap >> img2;
if (img2.empty()) break;
img2.copyTo(img2OriginalC);
cvtColor(img2, img2, COLOR_BGR2GRAY);
img2.copyTo(img2Original);
cv::calcOpticalFlowFarneback(img1, img2, res, .4, 1, 12, 2, 8, 1.2, 0);
for (int y = 0; y < img2.rows; y += 5) {
for (int x = 0; x < img2.cols; x += 5)
{
// get the flow from y, x position * 3 for better visibility
const Point2f flowatxy = res.at<Point2f>(y, x) * 1;
// draw line at flow direction
line(img2OriginalC, Point(x, y), Point(cvRound(x + flowatxy.x), cvRound(y + flowatxy.y)), Scalar(255, 0, 0));
// draw initial point
circle(img2OriginalC, Point(x, y), 1, Scalar(0, 0, 0), -1);
}
}
img2Original.copyTo(img1);
imshow("cat", img2OriginalC);
writer << img2OriginalC;
cout <<"Fps"<<endl;
cout <<fps<< endl;
for( int i=3; i>=0; i--)
{
//cout<< res << endl;
//return 0;
}
if (cv::waitKey(1) == 27) break;
}
cap.release();
return 0;
}
i think i know whats the output matrix but im not sure at all. Also,i dont know what does the values mean, finally does anyone knows how to use that output data??
Thanks
I'm using Opencv2.4.6 and Netbeans IDE for C++.
I'm trying to do a program to detect a face with the CascadeClassifier, and then pass the Rect of the face to the Camshift function to track this face.
To do that, I took the sample code "Camshiftdemo.cpp" that comes in the samples folder of opencv and I've modified. In my code instead of use the mouse to select the region over which you want to do tracking, is the cascade classifier which passes that information.
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
Mat image;
int trackObject = 0;
Rect selection;
int main()
{
VideoCapture cap;
Rect trackWindow;
int hsize = 16;
float hranges[] = {0,180};
const float* phranges = hranges;
int matchesNum = 0;
CascadeClassifier cascade;
if (!cascade.load("C:/opencv/data/haarcascades/haarcascade_frontalface_default.xml")) {
cout << "Cannot load face xml!" << endl;
return -1;
}
cap.open("D:/Videos_Proy/ProgramacionII/CAMERA3_clase1.MP4");
if (!cap.isOpened()) {
cout << "***Could not initialize capturing...***\n";
return -1;
}
namedWindow( "Result", 1 );
Mat frame, hsv, hue, hist, mask, backproj;
for(;;)
{
cap >> frame;
if( frame.empty() )
break;
frame.copyTo(image);
if ( !trackObject )
{
Mat grayframe;
vector <Rect> facesBuf;
int detectionsNum = 0;
cvtColor(image, grayframe, CV_BGR2GRAY);
cascade.detectMultiScale(grayframe, facesBuf, 1.2, 4, CV_HAAR_FIND_BIGGEST_OBJECT |
CV_HAAR_SCALE_IMAGE, cvSize(0, 0));
detectionsNum = (int) facesBuf.size();
Rect *faceRects = &facesBuf[0];
//It must found faces in three consecutives frames to start the tracking to discard false positives
if (detectionsNum > 0)
matchesNum += 1;
else matchesNum = 0;
if ( matchesNum == 3 )
{
trackObject = -1;
selection = faceRects[0];
}
for (int i = 0; i < detectionsNum; i++)
{
Rect r = faceRects[i];
rectangle(image, Point(r.x, r.y), Point(r.x + r.width, r.y + r.height), CV_RGB(0, 255, 0));
}
}
if( trackObject )
{
cvtColor(image, hsv, CV_BGR2HSV);
inRange(hsv, Scalar(0, 69, 53),
Scalar(180, 256, 256), mask);
int ch[] = {0, 0};
hue.create(hsv.size(), hsv.depth());
mixChannels(&hsv, 1, &hue, 1, ch, 1);
if( trackObject < 0 )
{
Mat roi(hue, selection), maskroi(mask, selection);
calcHist(&roi, 1, 0, maskroi, hist, 1, &hsize, &phranges);
normalize(hist, hist, 0, 255, CV_MINMAX);
trackWindow = selection;
trackObject = 1;
}
calcBackProject(&hue, 1, 0, hist, backproj, &phranges);
backproj &= mask;
RotatedRect trackBox = CamShift(backproj, trackWindow,
TermCriteria( CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 10, 1 ));
if( trackWindow.area() <= 1 )
{
int cols = backproj.cols, rows = backproj.rows, r = (MIN(cols, rows) + 5)/6;
trackWindow = Rect(trackWindow.x - r, trackWindow.y - r,
trackWindow.x + r, trackWindow.y + r) &
Rect(0, 0, cols, rows);
}
ellipse( image, trackBox, Scalar(0,0,255), 3, CV_AA );
}
imshow( "Result", image );
if(waitKey(30) >= 0) break;
}
return 0;
}
This code makes sense to me, and when I Build it in Netbeans do not get any error, the problem is that don't run and Netbeans don't give any clue, only says: RUN FAILED (exit value -1.073.741.819, total time: 5s)
Anyone could help me and give any idea about what is happening. Thanks!!