Application hangs at detectMultiscale - c++

I am developing a face detection algorithm in c++ visual studio with haar cascade method.
At the below line application quits while the same application works perfectly in linux environment.
Is there any opencv configuration which might cause this?
Any help would be really great.
frontfaceCascade.detectMultiScale(grey_image, faces, 1.1, 0, 0 |
CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
Thanks.
#include "stdafx.h"
#include <opencv2/opencv.hpp>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include <iostream>
#include <thread>
#include <stdlib.h>
#include <corecrt_math_defines.h>
using namespace std;
using namespace cv;
vector<Mat> Frames;
void getFrames(char* fileName);
cv::CascadeClassifier frontfaceCascade;
cv::CascadeClassifier sidefaceCascade;
int main(int argc, char *argv[]) {
if (argc < 2) {
cout << " enter file name: \n";
exit(1);
}
char* fileName = argv[1];
// if( !frontfaceCascade.load("lbpcascade_frontalface.xml")){
if (!frontfaceCascade.load("haarcascade_frontalface_default.xml")) {
std::cout << "Error Loading frontface haarcascade" << std::endl;
}
if (!sidefaceCascade.load("haarcascade_profileface.xml"))
{
std::cout << "Error Loading sideface haarcascade" << std::endl;
}
namedWindow("Faces", WINDOW_NORMAL);
resizeWindow("Faces", 100, 400);
moveWindow("Faces", 30, 100);
getFrames(fileName);
return 0;
}
void getFrames(char* fileName) {
cv::VideoCapture vcap(fileName);
if (!vcap.isOpened()) {
cout << "Error opening video stream or file" << endl;
return;
}
cv::Mat grey_image;
Mat img;
namedWindow("Frames", CV_WINDOW_AUTOSIZE);
resizeWindow("Frames", 640, 360);
moveWindow("Frames", 130, 100);
int init = 0;
while (1)
{
vcap >> img;
// If the frame is empty, break immediately
if (img.empty())
break;
unsigned int prevSize = 0;
resize(img, img, cv::Size(640, 360));
cv::cvtColor(img, grey_image, CV_RGB2GRAY);
//cv::equalizeHist(grey_image, grey_image);
std::vector<cv::Rect> faces;
frontfaceCascade.detectMultiScale(grey_image, faces, 1.1, 0, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30)); // Minimum size of obj
//sidefaceCascade.detectMultiScale(grey_image, faces, 1.1, 4, 0 | 1, Size(40, 40), Size(100, 100)); // Minimum size of obj
/****/
}
destroyAllWindows();
vcap.release();
}

Related

OpenCV: Unable to run Canny edge detection in C++

I have written a simple code to perform canny edge detection on a live stream. The code is as shown below,
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int lowThreshold=0;
int const max_lowThreshold = 100;
int kernel_size = 3;
int ratio = 3;
Mat img;
Mat display;
void CannyThreshold()
{
cvtColor(img, display, COLOR_RGB2GRAY);
// GaussianBlur(display,display,Size(7,7),3,3);
GaussianBlur(display, display, Size(1, 1), 1,1);
printf("%d\n",lowThreshold);
Canny(display,display,lowThreshold,3);
imshow("Canny",display);
}
int main()
{
VideoCapture cap(0);
namedWindow("Canny");
createTrackbar("Min Threshold: ","Canny",&lowThreshold,max_lowThreshold);
while(1)
{
cap.read(img);
int ret = waitKey(1);
CannyThreshold();
if(ret == 'q')
break;
}
cap.release();
return 0;
}
I get the following run-time error when I run the code. (I'm using OpenCV 4)
error: (-215:Assertion failed) ksize.width > 0 && ksize.width % 2 == 1 && ksize.height > 0 && ksize.height % 2 == 1 in function 'createGaussianKernels'
Any suggestions on how I can solve this error?
The issue is GaussianBlur cant accept kernel size of 1. Correct it to 3x3 or 5x5 in your code as follows
#include <opencv2/core/utility.hpp>
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, const char** argv)
{
VideoCapture cap;
Mat frame;
Mat image; // from cap to image
Mat src_gray;
Mat dst;
Mat detected_edges;
const String window_name = "Canny Edge Detector - VideoCapture";
int lowThreshold = 10;
const int max_lowThreshold = 100;
const int ratio = 3;
const int kernel_size = 3;
int FPS;
int frame_width, frame_height;
int camNum = 0;
cap.open(camNum);
if (!cap.isOpened())
{
cout << "***Could not initialize capturing...***\n";
cout << "Current parameter's value: \n";
return -1;
}
FPS = cap.get(CAP_PROP_FPS);
frame_width = cap.get(CAP_PROP_FRAME_WIDTH);
frame_height = cap.get(CAP_PROP_FRAME_HEIGHT);
dst.create(frame_width, frame_height, CV_8UC3);
//cout << CV_8UC3;
while (true)
{
cap >> frame;
if (frame.empty())
break;
frame.copyTo(image);
// Convert the image to grayscale
cvtColor(image, src_gray, COLOR_BGR2GRAY);
//![reduce_noise]
/// Reduce noise with a kernel 3x3
blur(src_gray, detected_edges, Size(3, 3));
//![reduce_noise]
//![canny]
/// Canny detector
Canny(detected_edges, detected_edges, lowThreshold, lowThreshold*ratio, kernel_size);
//![canny]
/// Using Canny's output as a mask, we display our result
//![fill]
dst = Scalar::all(0);
//![fill]
//![copyto]
image.copyTo(dst, detected_edges);
//![copyto]
//![display]
imshow(window_name, dst);
if (waitKey(1000 / FPS) >= 0)
break;
}
return 0;
}

OpenCV Optical Flow Core Dumped

I am using the OpenCV LK Optical Flow example, for some robot vision application with ROS.
It seems to be working reasonably well, however, I am having the error below:
OpenCV Error: Assertion failed ((npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0) in calc, file /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/video/src/lkpyramid.cpp, line 1231
terminate called after throwing an instance of 'cv::Exception'
what(): /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/video/src/lkpyramid.cpp:1231: error: (-215) (npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0 in function calc
Aborted (core dumped)
As far I could understand by reading the error message, and by the system behavior, it is happening when all of the initial tracking points are gone in the current frame.
There would be a way to restart this process (defining new points) instead of aborting?
My code by the way:
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/video.hpp>
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
using namespace cv;
using namespace std;
static const std::string OPENCV_WINDOW = "Image_window";
class ImageConverter
{
ros::NodeHandle nh_;
image_transport::ImageTransport it_;
image_transport::Subscriber image_sub_;
image_transport::Publisher image_pub_;
public:
cv::Mat current_frame;
public:
ImageConverter()
: it_(nh_)
{
image_sub_ = it_.subscribe("/realsense/color/image_raw", 1,
&ImageConverter::imageCallback, this);
cv::namedWindow(OPENCV_WINDOW);
}
~ImageConverter()
{
cv::destroyWindow(OPENCV_WINDOW);
}
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
cv_bridge::CvImagePtr cv_image;
try
{
cv_image = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
current_frame = cv_image->image;
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
};
int main(int argc, char** argv)
{
ros::init(argc, argv, "OpenCV_Node");
ImageConverter MyNode;
ros::Rate r(30);
r.sleep();
ros::spinOnce();
r.sleep();
// Tentative of Using Open CV Tools with a ROS Image
// Lucas-Kanade Optical Flow from: https://docs.opencv.org/master/d4/dee/tutorial_optical_flow.html
// Create some random colors
vector<Scalar> colors;
RNG rng;
for(int i = 0; i < 100; i++)
{
int r = rng.uniform(0, 256);
int g = rng.uniform(0, 256);
int b = rng.uniform(0, 256);
colors.push_back(Scalar(r,g,b));
}
Mat old_frame, old_gray;
vector<Point2f> p0, p1;
// Take first frame and find corners in it
old_frame = MyNode.current_frame;
cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
goodFeaturesToTrack(old_gray, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
// Create a mask image for drawing purposes
Mat mask = Mat::zeros(old_frame.size(), old_frame.type());
//while(true){
while(ros::ok()){
r.sleep();
Mat frame, frame_gray;
frame = MyNode.current_frame;
if (frame.empty())
break;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
// calculate optical flow
vector<uchar> status;
vector<float> err;
TermCriteria criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
calcOpticalFlowPyrLK(old_gray, frame_gray, p0, p1, status, err, Size(15,15), 2, criteria);
vector<Point2f> good_new;
for(uint i = 0; i < p0.size(); i++)
{
// Select good points
if(status[i] == 1) {
good_new.push_back(p1[i]);
// draw the tracks
line(mask,p1[i], p0[i], colors[i], 2);
circle(frame, p1[i], 5, colors[i], -1);
}
}
Mat img;
add(frame, mask, img);
imshow(OPENCV_WINDOW, img);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
// Now update the previous frame and previous points
old_gray = frame_gray.clone();
p0 = good_new;
ros::spinOnce();
}
return 0;
}

C++ Realsense openCV facedetection - access

I am new to Realsense & C++. I know this question might be easy on.
However, I cannot solve this even I searched half of day.
I am trying to use Realsense with OpenCV face detection(Haarcascade).
But when I use 'face_cascade,detectMultiScale', the project gets access violation error.
(just like the picture underneath)
and my codes are this :
// License: Apache 2.0.See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#include <rs.hpp> // Include RealSense Cross Platform API
#include <opencv2/opencv.hpp> // Include OpenCV API
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace std;
using namespace cv;
//String face_cascade_name;
CascadeClassifier face_cascade;
string window_name = "Face detection";
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
if (frame.empty()) {
printf("error, no data");
}
else {
printf("no problem");
}
//face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
face_cascade.detectMultiScale(frame_gray, faces , 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(500, 500));
for (size_t i = 0; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
ellipse(frame, center, Size(faces[i].width / 2, faces[i].height / 2),
0, 0, 360, Scalar(0, 0, 255), 4, 8, 0);
}
imshow(window_name, frame);
}
int main(int argc, char * argv[]) try
{
// Declare depth colorizer for pretty visualization of depth data
rs2::colorizer color_map;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
face_cascade.load("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");
//if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading face cascade\n"); };
// Start streaming with default recommended configuration
pipe.start();
const auto window_name = "Display Image";
namedWindow(window_name, WINDOW_AUTOSIZE);
while (waitKey(1) < 0 && cvGetWindowHandle(window_name))
{
rs2::frameset data = pipe.wait_for_frames(); // Wait for next set of frames from the camera
//rs2::frame depth = color_map(data.get_depth_frame());
rs2::frame color = data.get_color_frame();
// Query frame size (width and height)
//const int w = depth.as<rs2::video_frame>().get_width();
//const int h = depth.as<rs2::video_frame>().get_height();
const int color_w = color.as<rs2::video_frame>().get_width();
const int color_h = color.as<rs2::video_frame>().get_height();
// Create OpenCV matrix of size (w,h) from the colorized depth data
//Mat image(Size(w, h), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);
Mat image(Size(color_w, color_h), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
// Update the window with new data
//imshow(window_name, image);
detectAndDisplay(image);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
enter code here

How would I integrate the contourArea() function in my OpenCV program?

My code is for the purposes of detecting various shapes based off the number of vertices they have. I've finally accomplished that and it works perfectly for simple images like shown here: http://i.imgur.com/f9qvBwF.png However for this thresh image: http://i.imgur.com/jhl0NUQ.png it creates hundreds of shapes because of all the little pixels. Now I know enough of OpenCV to know there's eroding and dilating, but it seems like the best approach would be to exclude contours whose area is below a certain pixel count, however I don't know how to integrate the contourArea() function into my code such that it would work.
Here is my current code:
#include "opencv2/core/core.hpp"
#include "opencv2/flann/miniflann.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/photo/photo.hpp"
#include "opencv2/video/video.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/ml/ml.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/core/core_c.h"
#include "opencv2/highgui/highgui_c.h"
#include "opencv2/imgproc/imgproc_c.h"
using namespace cv;
using namespace std;
Mat img;
Mat img_gray;
void contour_finder(int, void*);
int main(int argc, char *argv[])
{
img = imread("C:/Users/wyndr_000/Documents/VisualStudio2013/Projects/OpenCV_Template/OpenCV_Template/testpic.png", CV_LOAD_IMAGE_UNCHANGED);
cvtColor(img, img_gray, CV_RGB2GRAY);
contour_finder(0, 0);
waitKey();
}
void contour_finder(int, void*)
{
Mat img_thresh;
Mat drawn_contours;
threshold(img_gray, img_thresh, 183, 255, 2);
imshow("Thresh Image", img_thresh);
vector<vector<Point> > contours;
vector<Point> result;
findContours(img_thresh, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Find a way to filter it out based on Area
for (size_t i = 0; i < contours.size(); i++){
approxPolyDP(Mat(contours[i]), result, arcLength(Mat(contours[i]), true)*0.02, true);
// if (contourArea(result) < 49)
//{
if (result.size() == 3)
{
cout << "TRIANGLE\n";
}
else if (result.size() == 4)
{
cout << "QUADRILATERAL\n";
}
else if (result.size() == 5)
{
cout << "PENTAGON\n";
}
else if (result.size() == 6)
{
cout << "HEXAGON\n";
}
else if (result.size() == 10)
{
cout << "STAR\n";
}
else if (result.size() == 12)
{
cout << "PLUS-SIGN\n";
}
// }
}
/* drawn_contours = Mat::zeros(img_thresh.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
drawContours(drawn_contours, contours, i, color, 2, 8, hierarchy, 0, Point());
}
namedWindow("Contours", CV_WINDOW_AUTOSIZE);
imshow("Contours", drawn_contours); */
}
You'll notice where I declare what different amounts of vertices indicate, I have a commented contourArea function, because that's where I imagined it would go.

Displaying video using vector<Mat> not working proplerly

I am trying to display video for in a separate function for this i am using vector i push_back each frame in and then pass this vector to function but my function displays a single frame repetitively. My code is below. Please tell me what i am doing wrong.
// newproject.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include "highgui.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <conio.h>
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void hsv_histogram(Mat Frame)
{
cvtColor( Frame, hsv_base, CV_BGR2HSV );
int h_bins = 50;
int s_bins = 32;
int histSize[] = { h_bins, s_bins };
float h_ranges[] = { 0, 256 };
float s_ranges[] = { 0, 180 };
const float* ranges[] = { h_ranges, s_ranges };
int channels[] = { 0, 1 };
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
std::vector<Mat>::iterator it;
it = videoframes.begin();
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
obj.hsv_histogram(frame);
videoframes.push_back(frame);
}
displayvideo(videoframes);
// waitKey(0); // key press to close window
}
void writer()
{
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
VideoWriter oVideoWriter ("D:/MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
{
cout << "ERROR: Failed to write the video" << endl;
exit(0);
}
}
void displayvideo(vector<Mat> videoframe)
{
Mat tempframe;
while(!videoframe.empty()) //Show the image captured in the window and repeat
{
tempframe = videoframe.back();
imshow("video", tempframe);
videoframe.pop_back();
waitKey(20); // waits to display frame
}
// waitKey(0);
}
void displayframe(Mat frame)
{
imshow("video", frame);
waitKey(20); // waits to display frame
}
};
int _tmain(int argc, _TCHAR* argv[])
{
video obj("video.avi");
//obj.readvideo();
}
You need to copy or clone your frame to another Mat then push to your vector, change your code like
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
Mat tmp=frame.clone();
obj.hsv_histogram(tmp);
videoframes.push_back(tmp);
}
In your code your passing the same pointer allocated for frame to your vector every time, so you will get array of Mat pointing single(same) memory location in your vector. To know more about OpenCV Mat and memory allocation See documantation