I have written this code to track a person's face but however, I'm getting very low fps. When I only do video my fps is a lot higher.
I have tried changing the resolution but that did not work.
#include "opencv2/core.hpp"
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <opencv2/objdetect.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
using namespace std;
int main(int argv, char** argc)
{
CascadeClassifier faceCascade;
faceCascade.load("C:\\Users\\farha\\OneDrive\\Desktop\\OpenCV4\\OpenCv\\install\\etc\\haarcascades\\haarcascade_frontalface_alt_tree.xml");
Mat frame;
Mat frameGray;
VideoCapture cap(0);
while (1)
{
cap >> frame;
cvtColor(frame, frameGray, COLOR_RGB2GRAY);
vector<Rect> faces;
faceCascade.detectMultiScale(frame, faces);
for (int i = 0; i < faces.size(); i++)
{
//int x1 = faces[i].x;
//int y1 = faces[i].y;
Point pt1(faces[i].x, faces[i].y);
Point pt2(faces[i].x + faces[i].width, faces[i].height + faces[i].y);//, (faces[i].y, faces[i].width)
rectangle(frame, pt1, pt2, Scalar(0, 0, 255), 2, 8, 0);
}
waitKey(1);
imshow("Window", frame);
}
return 0;
}
Related
First of all sorry for my bad english.
So, im using dense optical flow, GunnarFarneback speciffically. The code works fine.
Here's the code:
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/video/tracking.hpp"
#include <vector>
#include <iostream>
using namespace std;
using namespace cv;
// Display the results of the matches
//
int main(int argc, char** argv)
{
cv::Mat res, img1, img2, img2Original, img2OriginalC;
cv::VideoWriter writer;
cv::VideoCapture cap;
//cv::VideoCapture cap("prueba1.mp4");
// cap.open(std::string(argv[1]));
cap.open(0);
cv::namedWindow("cat", cv::WINDOW_AUTOSIZE);
cap >> img1;
cvtColor(img1, img1, COLOR_BGR2GRAY);
double fps = cap.get(cv::CAP_PROP_FPS);
//cv::Size tamano((int)cap.get(cv::CAP_PROP_FRAME_WIDTH), (int)cap.get(cv::CAP_PROP_FRAME_HEIGHT));
Size S = Size((int) cap.get(CV_CAP_PROP_FRAME_WIDTH),(int) cap.get(CV_CAP_PROP_FRAME_HEIGHT));
writer.open("mouse.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, S);
for (;;) {
cap >> img2;
if (img2.empty()) break;
img2.copyTo(img2OriginalC);
cvtColor(img2, img2, COLOR_BGR2GRAY);
img2.copyTo(img2Original);
cv::calcOpticalFlowFarneback(img1, img2, res, .4, 1, 12, 2, 8, 1.2, 0);
for (int y = 0; y < img2.rows; y += 5) {
for (int x = 0; x < img2.cols; x += 5)
{
// get the flow from y, x position * 3 for better visibility
const Point2f flowatxy = res.at<Point2f>(y, x) * 1;
// draw line at flow direction
line(img2OriginalC, Point(x, y), Point(cvRound(x + flowatxy.x), cvRound(y + flowatxy.y)), Scalar(255, 0, 0));
// draw initial point
circle(img2OriginalC, Point(x, y), 1, Scalar(0, 0, 0), -1);
}
}
img2Original.copyTo(img1);
imshow("cat", img2OriginalC);
writer << img2OriginalC;
cout <<"Fps"<<endl;
cout <<fps<< endl;
for( int i=3; i>=0; i--)
{
//cout<< res << endl;
//return 0;
}
if (cv::waitKey(1) == 27) break;
}
cap.release();
return 0;
}
i think i know whats the output matrix but im not sure at all. Also,i dont know what does the values mean, finally does anyone knows how to use that output data??
Thanks
I'm a novice programmer. My experience is very small. Now I have a code to detected people. I use SVM classifier and a HOG descriptor. Video is very long to be loaded and processed. Please help me with this problem.
#include <assert.h>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <conio.h>
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main()
{
string filename = "Street.MP4";
VideoCapture capture(filename);
Mat frame;
//namedWindow("w", 1);
while (true)
{
capture >> frame;
if (frame.empty())
break;
Mat img, res;
Mat framecopy = frame.clone();
resize(framecopy, img, Size(2 * framecopy.cols, 2 * framecopy.rows));
int nbins = 9;
Size cellSize(8, 8);
Size blockSize(16, 16);
Size blockStride(8, 8);
Size winSize(64, 128);
Size winStride(4, 4);
HOGDescriptor hog(winSize, blockSize, blockStride, cellSize, nbins);
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
assert(hog.checkDetectorSize());
vector<Rect> locations;
vector<double> weights;
hog.detectMultiScale(img, locations, weights, 0.0, winStride, Size(), 1.05, 2., true);
resize(img, res, Size(framecopy.cols / 2, framecopy.rows / 2));
for (size_t i = 0; i < locations.size(); ++i)
{
Rect detection = locations[i];
detection.x /= 2;
detection.y /= 2;
detection.width /= 2;
detection.height /= 2;
rectangle(res, detection, Scalar(0, 0, 255), 2);
}
imshow("w", res);
waitKey(20); // waits to display frame
}
waitKey();
return 0;
}
Don't create the hog detector in each iteration.
try:
#include <assert.h>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <conio.h>
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
int main()
{
string filename = "Street.MP4";
VideoCapture capture(filename);
Mat frame;
//namedWindow("w", 1);
int nbins = 9;
Size cellSize(8, 8);
Size blockSize(16, 16);
Size blockStride(8, 8);
Size winSize(64, 128);
Size winStride(4, 4);
HOGDescriptor hog(winSize, blockSize, blockStride, cellSize, nbins);
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
assert(hog.checkDetectorSize());
while (true)
{
capture >> frame;
if (frame.empty())
break;
Mat img, res;
Mat framecopy = frame.clone();
resize(framecopy, img, Size(2 * framecopy.cols, 2 * framecopy.rows));
vector<Rect> locations;
vector<double> weights;
hog.detectMultiScale(img, locations, weights, 0.0, winStride, Size(), 1.05, 2., true);
resize(img, res, Size(framecopy.cols / 2, framecopy.rows / 2));
for (size_t i = 0; i < locations.size(); ++i)
{
Rect detection = locations[i];
detection.x /= 2;
detection.y /= 2;
detection.width /= 2;
detection.height /= 2;
rectangle(res, detection, Scalar(0, 0, 255), 2);
}
imshow("w", res);
waitKey(1); // waits to display frame
}
waitKey();
return 0;
}
But keep in mind that HoG detection is a quite expensive operation. What's the resolution of your images?
I am beginner at opencv.The thing is I am trying to do that is cropping eye region from main frame. But I get an error like this
Assertion error " Vector subscript out of range Line 1140."
Could you guys have any idea about how to fix that ?
Here is my code :
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv)
{
CascadeClassifier eye;
CascadeClassifier righteye;
CascadeClassifier lefteye;
eye.load("haarcascade_mcs_eyepair_big.xml");
righteye.load("haarcascade_mcs_lefteye.xml");
lefteye.load("haarcascade_mcs_righteye.xml");
VideoCapture vid;
vid.open(0);
if (!vid.isOpened())
{
cout<<"webcam cannot open"<<endl;
system("Pause");
return -1;
}
Mat frame;
Mat grires;
Mat frame2;
Mat frame3;
namedWindow("algilanan",1 );
namedWindow("algilanan2",2);
namedWindow("algilanan3",3);
while(true)
{
vid>>frame;
cvtColor(frame, grires, CV_BGR2GRAY); //resmi gri renk uzayına çevirir.
//equalizeHist(grires, grires); //istenirse histogram eşitlenir.
vector<Rect> eyepair;
eye.detectMultiScale(grires, eyepair, 1.1, 3, 0, Size(30,30));
for(int i = 0; i < eyepair.size(); i++)
{
Point pt1(eyepair[i].x + eyepair[i].width,eyepair[i].y + eyepair[i].height);
Point pt2(eyepair[i].x, eyepair[i].y);
rectangle(frame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Mat ROI =grires(eyepair[i]);
vector<Rect> reye;
vector<Rect> leye;
righteye.detectMultiScale(ROI, reye, 1.1, 3, 0, Size(30,30));
lefteye.detectMultiScale(ROI, leye, 1.1, 3, 0, Size(30,30));
for(int t = 0; t < reye.size(); t++)
{
Point pt1(eyepair[i].x + reye[t].x, eyepair[i].x + reye[t].y + reye[t].height);
Point pt2(reye[t].x,reye[t].y);
rectangle(ROI, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Rect r1 = reye[t];
frame3=ROI(r1);
}
for(int z = 0; z < leye.size(); z++)
{
Point pt1(eyepair[i].x + leye[z].x, eyepair[i].x + leye[z].y + leye[z].height);
Point pt2(leye[z].x,leye[z].y);
rectangle(ROI, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
Rect r2 = reye[z];
frame2=ROI(r2);
}
}
imshow("algilanan", frame);
imshow("algilanan2",frame2 );
imshow("algilanan3", frame3);
waitKey(33);
}
return 0;
}
I want to use the fitLine function to come up with a line to draw on my source image src_crop. I load the frame in my main() and call the drawLine().
But the code aborts with the following error :
Code:
#include "stdafx.h"
#include <fstream>
#include <iostream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace std;
using namespace cv;
/// Global variables
Mat src_gray;
Mat src_crop;
Mat dst, detected_edges;
int edgeThresh = 1;
int lowThreshold = 27;
int const max_lowThreshold = 100;
int ratio = 3;
int kernel_size = 3;
char* window_name = "Edge Map";
int i,j;
void drawLine(int, void*)
{
vector<Vec4f> outline;
vector<Point2f> ssline;
int flag2 = 0;
/// Reduce noise with a kernel 3x3
blur(src_gray, detected_edges, Size(3, 3));
/// Canny detector
Canny(detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size);
/// Using Canny's output as a mask, we display our result
dst.create(detected_edges.size(), detected_edges.type());
dst = Scalar::all(0);
src_crop.copyTo(dst, detected_edges);
//namedWindow("Detected Edges", CV_WINDOW_AUTOSIZE);
//imshow("Detected Edges", detected_edges);
cvtColor(dst, dst, CV_BGR2GRAY);
for (j = 0; j < dst.cols; j++)
{
for (i = 0; i < dst.rows; i++)
{
if (Scalar(dst.at<uchar>(i,j)).val[0] >= 90)
{
//cout << "Hi";
flag2 = 1;
break;
}
}
if (flag2 == 1)
break;
}
int k = j;
int l = i;
for (j = k; j < dst.cols; j++)
{
Point2f ss = Point2f(l,j);
ssline.push_back(ss);
}
fitLine(ssline, outline, CV_DIST_L1, 0, 0.01, 0.01);
//imshow("Result", src_crop);
}
int main(int argc, char** argv)
{
/// Load an image
src = imread(s);
if (!src.data)
{
return -1;
}
/// Create a matrix of the same type and size as src (for dst)
//dst.create(src.size(), src.type());
src_crop = src;
/// Convert the image to grayscale
cvtColor(src_crop, src_gray, CV_BGR2GRAY);
/// Create a window
namedWindow(window_name, CV_WINDOW_AUTOSIZE);
/// Create a Trackbar for user to enter threshold
createTrackbar("Min Threshold:", window_name, &lowThreshold, max_lowThreshold, drawLine);
/// Show the image
drawLine(0, 0);
if (waitKey(30) >= 0) break;
return 0;
}
The code stops working at the point fitLine() is called. This I found by testing the code with printf statements.
Can anyone kindly help me solve the issue?
Aside the fact the your code won't compile, the issue is that you're passing to fitLine the parameter outline as a vector<Vec4f>, while it should be a Vec4f.
Change outline declaration as:
Vec4f outline;
I am trying to make sense of the hierarchy in the findContour function in OpenCV. I am using RETR_CCOMP to hopefully find the parent contours and whether or not they have 'children'. My find contour hierarchy vector seems to suggest there are far more parent contours then are visible in the output which makes it more difficult to use.
Is it possible to get around this problem?.
Here's my code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src;
Mat src_gray;
Mat src_edges;
int main(int argc, char** argv)
{
src = imread("image.jpg", CV_LOAD_IMAGE_UNCHANGED);
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
Canny(src_gray, src_edges, 200, 120);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(src_edges, contours, hierarchy, RETR_CCOMP, CV_CHAIN_APPROX_TC89_L1, Point(0, 0));
for (auto vec : hierarchy)
std::cout << vec << std::endl; // just to have a look at the hierarchy vector
int count = 0;
for (int i = 0; i < 5; i++){
if (hierarchy[i][3] != 0){
count++;
}
}
cout << "number of labels = " << count << endl;
Mat drawing = Mat::zeros(src_gray.size(), CV_8UC3);
for (int i = 0; i < contours.size(); i++)
{
Scalar color = Scalar(rand() & 255, rand() & 255, rand() & 255);;
drawContours(drawing, contours, i, color, 2, 8, hierarchy, 0, Point());
}
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawing);
namedWindow("Original Image", CV_WINDOW_AUTOSIZE);
imshow("Original Image", src);
waitKey(0);
return(0);
}
Here an example image: