Hello everyone I have been having this issue with my opencv 3.1 program that I am programming in visual studio 2015 C++. It can use the camera just fine and display images just fine, but I try out a face detect example from the documentation my computer freezes up. Here is the code:
Here are the Two Functions where it gets hung up on: but mainly stops around the faceDetect function, I will do a break point to know specifically where.
void CoreVision::init()
{
face_cascade_name = "C:\\PAD\\opencv\\haarcascades\\haarcascade_frontalface_alt.xml";
eyes_cascade_name = "C:\\PAD\\opencv\\haarcascades\\haarcascade_eye_tree_eyeglasses.xml";
if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading face cascade\n"); };
if (!eyes_cascade.load(eyes_cascade_name)) { printf("--(!)Error loading eyes cascade\n"); };
std::cout << "face_dir: " << face_cascade_name << std::endl;
std::cout << "eye_dir: " << eyes_cascade_name << std::endl;
}
cv::Mat CoreVision::detectFace(cv::Mat main)
{
cv::Mat nuller;
try
{
faces.clear();
cv::Mat frame_gray;
cv::cvtColor(main, frame_gray, cv::COLOR_BGR2GRAY);
cv::equalizeHist(frame_gray, frame_gray);
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
for (size_t i = 0; i < faces.size(); i++)
{
cv::Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
cv::ellipse(main, center, cv::Size(faces[i].width / 2, faces[i].height / 2), 0, 0, 360, cv::Scalar(255, 0, 255), 4, 8, 0);
cv::Mat faceROI = frame_gray(faces[i]);
std::vector<cv::Rect> eyes;
eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | cv::CASCADE_SCALE_IMAGE, cv::Size(30, 30));
for (size_t j = 0; j < eyes.size(); j++)
{
cv::Point eye_center(faces[i].x + eyes[j].x + eyes[j].width / 2, faces[i].y + eyes[j].y + eyes[j].height / 2);
int radius = cvRound((eyes[j].width = eyes[j].height) * 0.25);
cv::circle(main, eye_center, radius, cv::Scalar(255, 0, 0), 4, 8, 0);
}
}
return main;
}
catch (...)
{
std::cout << "Exception: have issue with processing face detection.";
return nuller;
}
Here is the main function:
int main()
{
CoreVision _core;
_core.init();
while (true)
{
cv::Mat img = _core.captureImage(-1);
img = _core.detectFace(img);
_core.displayImage(img, "parker");
}
}
And finally the .h file for the two functions:
#include <iostream>
#include <stdio.h>
#include <tchar.h>
#include <iostream>
#include <opencv2\opencv.hpp>
#include <opencv2\world.hpp>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
class CoreVision
{
public:
CoreVision();
~CoreVision();
cv::Mat loadImage(const char* image_name, int type);
void saveImage(cv::Mat img, const char* filename);
cv::Mat captureImage(int cam);
cv::Mat detectFace(cv::Mat main);
int convertImage(cv::Mat img, int type);
void displayImage(cv::Mat, const char* window_name);
void init(void);
char cCurrentPath[FILENAME_MAX];
cv::String face_cascade_name;
cv::String eyes_cascade_name;
cv::CascadeClassifier face_cascade;
cv::CascadeClassifier eyes_cascade;
std::vector<cv::Rect> faces;
};
Thank you in advance for your help, I am not for sure if it is a driver issues, but I have tried it on multiple computers and the same freezing happens.
you don't seem to be storing the faces vector. You have it in public
std::vector faces; but where are you using it? eyes and faces are 2 different sets of data. Your also mixing two styles of code c style and c++ style opencv. You should stick with c++ since your using the latest opencv version.
faces.clear(); // under here
std::vector<Rect>faces; // see if this helps
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void saveImage(cv::Mat img, const char* filename); //should be
void imwrite(img, const char* filename);// etc.
cv::Mat img = _core.captureImage(-1);
img = _core.detectFace(img);
_core.displayImage(img, "parker"); // should be
_core.displayImage(Mat frame);
this proably isn't going to work asuming core.captureImage(-1);
is your camera. also what does int convertImage(cv::Mat img, int type); do? are you sure it takes an int value?
c++ convention is
Videocapture cap(0);
Mat frame:
cap >> frame;
namedWindow("video");
imshow(video, frame); //etc
I had exactly the same issue. I figured out that the line containing eyes_cascade.detectMultiScale was the one causing my driver to crash. I have the Intel HD graphics 4400.
The code started working perfectly after I completely uninstalled the Intel HD graphics drivers.
Related
I have been trying to develop a program - written with C++ and using OpenCV -which counts the overall value of coins shown in some image.
I should note that I am new to the opencv platform.
In order to achieve this goal, as far as I understand - there has to be a use of the Hough transform to detect the ratio of the coins. I found this code example on the OpenCV site, but I can't put a value for the coins.
Here is what I have done so far.
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
namespace
{
const std::string windowName = "Coins detection";
const std::string cannyThresholdTrackbarName = "Canny threshold";
const std::string accumulatorThresholdTrackbarName = "Accumulator Threshold";
const int cannyThresholdInitialValue = 41;
const int accumulatorThresholdInitialValue = 87;
const int maxAccumulatorThreshold = 200;
const int maxCannyThreshold = 255;
void HoughDetection(const Mat& src_gray, const Mat& src_display, int cannyThreshold, int accumulatorThreshold)
{
std::vector<Vec3f> circles;
HoughCircles( src_gray, circles, HOUGH_GRADIENT, 1, src_gray.rows/8, cannyThreshold, accumulatorThreshold, 0, 0 );
Mat display = src_display.clone();
for( size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle( display, center, 3, Scalar(0,255,0), -1, 8, 0 );
circle( display, center, radius, Scalar(0,0,255), 3, 8, 0 );
}
imshow( windowName, display);
}
}
int main(int argc, char** argv)
{
Mat src, src_gray;
String imageName("c:\\moedas.jpg");
if (argc > 1)
{
imageName = argv[1];
}
src = imread( imageName, IMREAD_COLOR );
if( src.empty() )
{
std::cerr<<"Invalid input image\n";
return -1;
}
cvtColor( src, src_gray, COLOR_BGR2GRAY );
GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 );
int cannyThreshold = cannyThresholdInitialValue;
int accumulatorThreshold = accumulatorThresholdInitialValue;
namedWindow( windowName, WINDOW_AUTOSIZE );
createTrackbar(cannyThresholdTrackbarName, windowName, &cannyThreshold,maxCannyThreshold);
createTrackbar(accumulatorThresholdTrackbarName, windowName, &accumulatorThreshold, maxAccumulatorThreshold);
char key = 0;
while(key != 'q' && key != 'Q')
{
cannyThreshold = std::max(cannyThreshold, 1);
accumulatorThreshold = std::max(accumulatorThreshold, 1);
HoughDetection(src_gray, src, cannyThreshold, accumulatorThreshold);
key = (char)waitKey(10);
}
return 0;
}
The code you have so far only segments circle shapes in an input image. This is just the first step to count the coins. There are many ways to perform that task, ranging from simple contour counting techniques to complicated deep learning, and the explanation of such techniques is too broad and large in scope to put efficiently and concisely in an SO answer. However, here are some coin detectors/counter implementations/tutorials that you can check:
Implementation 1 in Python. This is the best of the list, although the code file is larger than yours, not too hard to port to C++. It has the best detection/counting performance but deals with Neural Networks, specifically, a Multilayer Perceptron.
Implementation 2 in Python. This is a very small code file, nearly as large as yours and has no idiomatic Python code, porting it to C++ is a no brainer, you should start here. This implementation uses mere contour counting with the aid of the Canny edge detector.
Tutorial 1 in C++. A simple tutorial in C++, but only serves introductory purposes, the implementations listed above are the real thing.
I'm pretty new to OpenCV and cascade training. I did train a cascade to recognize "running" motion. I used Haar-like and LBP. For haar-like I used 1181 positive images and 3866 negative images and 12 stages. For LBP I used 2426 positive images and 1031 negative images and 12 staged.
Following, you could see a screenshot of each one.
After a while waiting for training to be finished (around 3-4 hours for each one) I would be able to create the xml files from AdaBoost chart classifiers.
The problem is that when I use them on my code to see how they are working, they don't recognize any of obvious running motions! The code is written with C++ and OpenCV 3.3.
Does anyone have any idea why my cascade xml files after training do not working properly?
C++ code is as below:
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
//open from file
VideoCapture video("../data/videos/people_activity/run/run2.mp4");
Mat img;
Mat frmg;
// Load cascate classifier placed in sulution folder
CascadeClassifier bodydetector, rundetector;
string bodycascadeName = "haarcascade_fullbody.xml";
string runcascadeName = "lbpcascade_run.xml";
bool bodyloaded = bodydetector.load(bodycascadeName);
bool runloaded = rundetector.load(runcascadeName);
// Parameters of detectMultiscale Cascade Classifier
int groundThreshold = 1;
double scaleStep = 1.1;
Size minimalObjectSize(40, 40);
Size maximalObjectSize(200, 200);
// Vector of returned bodies and run
std::vector<Rect> bodyfound, runmotion;
while (video.read(img))
{
// Image from video read and store to Mat
video >> img;
if (img.rows == 0)//end of video
break;
// Convert input to greyscale
Mat image_grey;
cvtColor(img, image_grey, CV_BGR2GRAY);
bodyfound.clear();
// Detect bodies
bodydetector.detectMultiScale(image_grey, bodyfound, scaleStep, groundThreshold, 0 | 2, minimalObjectSize, maximalObjectSize);
for (size_t k = 0; k < bodyfound.size(); k++)
{
//draw a rectangle with red color around it
rectangle(img, bodyfound[k].br(), bodyfound[k].tl(), Scalar(0, 0, 0), 1, 8, 0);
Mat bodyROI = img(bodyfound[k]);
//Detect running for bodies if there is any!
rundetector.detectMultiScale(bodyROI, runmotion, scaleStep, groundThreshold, 0 | 2, minimalObjectSize, maximalObjectSize);
//if any were found!
if (runmotion.size() > 0) {
for (size_t i = 0; i < runmotion.size(); i++)
{
//draw a rectangle with red color around it
rectangle(img, runmotion[i].br(), runmotion[i].tl(), Scalar(80, 0, 255), 1, 8, 0);
}
}
}
imshow("result", img);
int c = waitKey(10);
if ((char)c == 27) { break; } // escape
}
return 0;
}
I am currently trying to write a face detection program and i seem to have a slight problem with it even though it is able to build successfully. Any ideas on how to solve this? Below indicates the error that appears when i debug and my code.
detect.cpp:
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/objdetect.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// capture from web camera init
VideoCapture cap(0);
cap.open(0);
Mat img;
// Initialize the inbuilt Harr Cascade frontal face detection
// Below mention the path of where your haarcascade_frontalface_alt2.xml file is located
CascadeClassifier face_cascade;
face_cascade.load("C:\OpenCV\sources\data\haarcascades\haarcascade_frontalface_alt2.xml");
// i tried changing this line to match my folder in C Drive
for (;;)
{
// Image from camera to Mat
cap >> img;
// obtain input image from source
cap.retrieve(img, CV_CAP_OPENNI_BGR_IMAGE);
// Just resize input image if you want
resize(img, img, Size(1000, 640));
// Container of faces
vector<Rect> faces;
// Detect faces
face_cascade.detectMultiScale(img, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(140, 140));
// error message appears here
//Show the results
// Draw circles on the detected faces
for (int i = 0; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5);
ellipse(img, center, Size(faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
}
// To draw rectangles around detected faces
/* for (unsigned i = 0; i<faces.size(); i++)
rectangle(img,faces[i], Scalar(255, 0, 0), 2, 1);*/
imshow("wooohooo", img);
int key2 = waitKey(20);
}
return 0;
}
error message:
Unhandled exception at 0x000007FEFD5CA06D in opencvTry.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000000029EE10. occurred
After i looked at my codes one by one, i realized that it was just the back slash that causes the error. The code i used in the above was this :
face_cascade.load("C:\OpenCV\sources\data\haarcascades\haarcascade_frontalface_alt2.xml");
But in actual fact, it should have been this:
face_cascade.load("C:/OpenCV/sources/data/haarcascades/haarcascade_frontalface_alt2.xml");
I am very new to OpenCV and I am trying to detect just the penny image, but I am getting a bunch of smaller circles. Can someone tell me what Im doing wrong?
Code from here: https://github.com/opencv/opencv/blob/master/samples/cpp/houghcircles.cpp
Only things I changed were to make min circle radius 400, and max of circle 0. Because I know the image will be 600x480 so the penny circle must be at least 400.
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
static void help()
{
cout << "\nThis program demonstrates circle finding with the Hough transform.\n"
"Usage:\n"
"./houghcircles <image_name>, Default is ../data/board.jpg\n" << endl;
}
int main(int argc, char** argv)
{
cv::CommandLineParser parser(argc, argv,
"{help h ||}{#image|../data/board.jpg|}"
);
if (parser.has("help"))
{
help();
return 0;
}
//![load]
string filename = parser.get<string>("#image");
Mat img = imread(filename, IMREAD_COLOR);
if(img.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
//![load]
//![convert_to_gray]
Mat gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
//![convert_to_gray]
//![reduce_noise]
medianBlur(gray, gray, 5);
//![reduce_noise]
//![houghcircles]
vector<Vec3f> circles;
HoughCircles(gray, circles, HOUGH_GRADIENT, 1,
gray.rows/16, // change this value to detect circles with different distances to each other
100, 30, 400,0 // change the last two parameters
// (min_radius & max_radius) to detect larger circles
);
//![houghcircles]
//![draw]
for( size_t i = 0; i < circles.size(); i++ )
{
Vec3i c = circles[i];
circle( img, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, LINE_AA);
circle( img, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, LINE_AA);
}
//![draw]
//![display]
imshow("detected circles", img);
waitKey();
//![display]
return 0;
}
You've got radius and diameter mixed up. Your minimum radius cannot be 400 if your image is only 600x480. Set your min_radius to 200.
HoughCircles(gray, circles, HOUGH_GRADIENT, 1,
max(gray.cols,gray.rows), // to find only the biggest perfect circle
100, 100, 0,0 // leave other params as default
);
I have a function that gets called from main in a for loop that searches for faces from a video feed. The code runs perfectly in the first run through, but on the second loop it outputs many "Camera dropped frame!" errors to the console and no longer updates the video feed.
I have found the line that causes the erros, it is the one that contains the detectMultiScale function in it. The full function is here:
void findInFrame(Mat inputFrame)
{
vector<Rect> faces;
Mat grayFrame;
cvtColor(inputFrame, grayFrame, COLOR_BGR2GRAY);
faceClassifier.detectMultiScale( grayFrame, faces);
for(int i=0;i<faces.size();i++)
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse(inputFrame,center,Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = grayFrame(faces[i]);
}
imshow("frame", inputFrame);
}
The line that throws the error is:
faceClassifier.detectMultiScale( grayFrame, faces);
Every frame after the first causes the errors. How can i fix this?
Main is here:
#include <iostream>
#include <unistd.h>
#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
string faceHaar = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
string eyesHaar = "/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml";
CascadeClassifier faceClassifier;
void findInFrame(Mat inputFrame);
int main(int argc, const char * argv[])
{
VideoCapture cam(0);
Mat frame;
if(!faceClassifier.load(faceHaar))
{
cout << "Error loading face cascade" << endl;
return -1;
}
for(;;)
{
cam >> frame;
if(!frame.empty())
{
findInFrame(frame);
usleep(1000);
}
else
{
cout << "frame empty" << endl;
}
}
return 0;
}
Try specify the function a bit more - I feel like its just taking too long to process your matches.
faceClassifier.detectMultiScale(grayFrame, faces, 1.3, 3,0|CV_HAAR_SCALE_IMAGE, Size(20, 30));
Where size is the size you trained your detector, 1.3 is a scale threshold and 3 is how many nearest neighbours are needed for a match.
Aside from that, dropping frames isn't a huge issue, but you could well be doing some things wrong elsewhere in your code, like where you grab your new frame.
I also would consider changing the function to void findInFrame(Mat &inputFrame) and calling imshow in your main loop, not in the function. Note that the &inputFrame isn't really a conventional pointer and doesn't require you to change how you reference inputFrame in the function