C++ Realsense openCV facedetection - access - c++

I am new to Realsense & C++. I know this question might be easy on.
However, I cannot solve this even I searched half of day.
I am trying to use Realsense with OpenCV face detection(Haarcascade).
But when I use 'face_cascade,detectMultiScale', the project gets access violation error.
(just like the picture underneath)
and my codes are this :
// License: Apache 2.0.See LICENSE file in root directory.
// Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#include <rs.hpp> // Include RealSense Cross Platform API
#include <opencv2/opencv.hpp> // Include OpenCV API
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace std;
using namespace cv;
//String face_cascade_name;
CascadeClassifier face_cascade;
string window_name = "Face detection";
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
if (frame.empty()) {
printf("error, no data");
}
else {
printf("no problem");
}
//face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
face_cascade.detectMultiScale(frame_gray, faces , 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(500, 500));
for (size_t i = 0; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width / 2, faces[i].y + faces[i].height / 2);
ellipse(frame, center, Size(faces[i].width / 2, faces[i].height / 2),
0, 0, 360, Scalar(0, 0, 255), 4, 8, 0);
}
imshow(window_name, frame);
}
int main(int argc, char * argv[]) try
{
// Declare depth colorizer for pretty visualization of depth data
rs2::colorizer color_map;
// Declare RealSense pipeline, encapsulating the actual device and sensors
rs2::pipeline pipe;
face_cascade.load("C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt.xml");
//if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading face cascade\n"); };
// Start streaming with default recommended configuration
pipe.start();
const auto window_name = "Display Image";
namedWindow(window_name, WINDOW_AUTOSIZE);
while (waitKey(1) < 0 && cvGetWindowHandle(window_name))
{
rs2::frameset data = pipe.wait_for_frames(); // Wait for next set of frames from the camera
//rs2::frame depth = color_map(data.get_depth_frame());
rs2::frame color = data.get_color_frame();
// Query frame size (width and height)
//const int w = depth.as<rs2::video_frame>().get_width();
//const int h = depth.as<rs2::video_frame>().get_height();
const int color_w = color.as<rs2::video_frame>().get_width();
const int color_h = color.as<rs2::video_frame>().get_height();
// Create OpenCV matrix of size (w,h) from the colorized depth data
//Mat image(Size(w, h), CV_8UC3, (void*)depth.get_data(), Mat::AUTO_STEP);
Mat image(Size(color_w, color_h), CV_8UC3, (void*)color.get_data(), Mat::AUTO_STEP);
// Update the window with new data
//imshow(window_name, image);
detectAndDisplay(image);
}
return EXIT_SUCCESS;
}
catch (const rs2::error & e)
{
std::cerr << "RealSense error calling " << e.get_failed_function() << "(" << e.get_failed_args() << "):\n " << e.what() << std::endl;
return EXIT_FAILURE;
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return EXIT_FAILURE;
}
enter code here

Related

[OpenCV][C++] Record only the detected motion

I'm working on a project to detect motion on a camera.
I need to start recording video when motion is detected for example:
Record while motion is being detected
Continue recording for 10 seconds after the motion detection is stopped
I have a working example that only detects the motion and draw rectangles on the moving parts.
I searched for examples on how to record when motion is detected but no good results.
Here is my working code:
#include <iostream>
#include <sstream>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/videoio.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/video.hpp>
#include <unistd.h>
using namespace cv;
using namespace std;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int main(int argc, char* argv[])
{
//create Background Subtractor objects
Ptr<BackgroundSubtractor> pBackSub;
pBackSub = createBackgroundSubtractorMOG2();
VideoCapture capture(0);
if (!capture.isOpened()){
//error in opening the video input
cerr << "Unable to open: " << endl;
return 0;
}
Mat frame, fgMask;
sleep(3);
while (true) {
capture >> frame;
if (frame.empty())
break;
//update the background model
pBackSub->apply(frame, fgMask);
imshow("FG Mask", fgMask);
RNG rng(12345);
findContours(fgMask, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<Rect>boundRect (contours.size());
vector<vector<Point> > contours_poly( contours.size() );
for (int i = 0; i < contours.size();i++) {
if( contourArea(contours[i])< 500)
{
continue;
}
putText(frame, "Motion Detected", Point(10,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
approxPolyDP( contours[i], contours_poly[i], 3, true );
boundRect[i] = boundingRect( contours_poly[i] );
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
rectangle( frame, boundRect[i].tl(), boundRect[i].br(), color, 2 );
}
imshow("Frame", frame);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
}
return 0;
}
I tried adding this to the code:
int frameWidth = 320;
int frameHeight = 240;
cv::Size frameSize = cv::Size(frameWidth, frameHeight);
/* Output file */
int codec = cv::VideoWriter::fourcc('M', 'P', '4', 'V');
cv::VideoWriter outputVideo;
outputVideo.open("rr.mp4", codec, capture.get(cv::CAP_PROP_FPS), frameSize, true);
and after drawing the rectangle I write the frame to the video:
outputVideo.write(frame);
but after that, the video is empty and crashes.
I already took a look at Motion but I didn't find an example.
How can I achieve this?
Thanks,
Talel
I resolved the issue,
I was opening the output video with a specific dimensions (320,240) and I was saving the captured frame which is bigger.
So the solution is to resize the captured frame to fit into the output video.
Here is the final solution if anyone is interesting:
Turn the laptop camera into an IP camera with: cam2ip
Here is the source code:
#include <iostream>
#include <sstream>
#include <opencv4/opencv2/imgproc.hpp>
#include <opencv4/opencv2/videoio.hpp>
#include <opencv4/opencv2/highgui.hpp>
#include <opencv4/opencv2/video.hpp>
#include <unistd.h>
using namespace cv;
using namespace std;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int main(int argc, char* argv[])
{
//create Background Subtractor objects
Ptr<BackgroundSubtractor> pBackSub;
pBackSub = createBackgroundSubtractorMOG2();
const std::string videoStreamAddress = "http://192.168.20.100:56000/mjpeg";
cv::VideoCapture vcap;
if(!vcap.open(videoStreamAddress)) {
std::cout << "Error opening video stream or file" << std::endl;
return -1;
}
Mat frame, fgMask;
int frameWidth = 320;
int frameHeight = 240;
cv::Size frameSize = cv::Size(frameWidth, frameHeight);
/* Output file */
int codec = cv::VideoWriter::fourcc('M', 'P', '4', 'V');
cv::VideoWriter outputVideo;
outputVideo.open("rr.mp4", codec, vcap.get(cv::CAP_PROP_FPS), frameSize, true);
sleep(3);
while (true) {
vcap >> frame;
if (frame.empty())
break;
//update the background model
pBackSub->apply(frame, fgMask);
imshow("FG Mask", fgMask);
RNG rng(12345);
findContours(fgMask, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE,Point(0, 0));
vector<Rect>boundRect (contours.size());
vector<vector<Point> > contours_poly( contours.size() );
for (int i = 0; i < contours.size();i++) {
if( contourArea(contours[i])< 500)
{
continue;
}
putText(frame, "Motion Detected", Point(10,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
approxPolyDP( contours[i], contours_poly[i], 3, true );
boundRect[i] = boundingRect( contours_poly[i] );
Scalar color = Scalar( rng.uniform(0, 256), rng.uniform(0,256), rng.uniform(0,256) );
rectangle( frame, boundRect[i].tl(), boundRect[i].br(), color, 2 );
resize(frame, frame, frameSize);
outputVideo.write(frame);
}
imshow("Frame", frame);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
}
outputVideo.release();
return 0;
}
Further enhancement suggestions:
Make sure that the light is not part of the motion detection
Open an output video with the same capture's dimensions

Application hangs at detectMultiscale

I am developing a face detection algorithm in c++ visual studio with haar cascade method.
At the below line application quits while the same application works perfectly in linux environment.
Is there any opencv configuration which might cause this?
Any help would be really great.
frontfaceCascade.detectMultiScale(grey_image, faces, 1.1, 0, 0 |
CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
Thanks.
#include "stdafx.h"
#include <opencv2/opencv.hpp>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include <iostream>
#include <thread>
#include <stdlib.h>
#include <corecrt_math_defines.h>
using namespace std;
using namespace cv;
vector<Mat> Frames;
void getFrames(char* fileName);
cv::CascadeClassifier frontfaceCascade;
cv::CascadeClassifier sidefaceCascade;
int main(int argc, char *argv[]) {
if (argc < 2) {
cout << " enter file name: \n";
exit(1);
}
char* fileName = argv[1];
// if( !frontfaceCascade.load("lbpcascade_frontalface.xml")){
if (!frontfaceCascade.load("haarcascade_frontalface_default.xml")) {
std::cout << "Error Loading frontface haarcascade" << std::endl;
}
if (!sidefaceCascade.load("haarcascade_profileface.xml"))
{
std::cout << "Error Loading sideface haarcascade" << std::endl;
}
namedWindow("Faces", WINDOW_NORMAL);
resizeWindow("Faces", 100, 400);
moveWindow("Faces", 30, 100);
getFrames(fileName);
return 0;
}
void getFrames(char* fileName) {
cv::VideoCapture vcap(fileName);
if (!vcap.isOpened()) {
cout << "Error opening video stream or file" << endl;
return;
}
cv::Mat grey_image;
Mat img;
namedWindow("Frames", CV_WINDOW_AUTOSIZE);
resizeWindow("Frames", 640, 360);
moveWindow("Frames", 130, 100);
int init = 0;
while (1)
{
vcap >> img;
// If the frame is empty, break immediately
if (img.empty())
break;
unsigned int prevSize = 0;
resize(img, img, cv::Size(640, 360));
cv::cvtColor(img, grey_image, CV_RGB2GRAY);
//cv::equalizeHist(grey_image, grey_image);
std::vector<cv::Rect> faces;
frontfaceCascade.detectMultiScale(grey_image, faces, 1.1, 0, 0 | CV_HAAR_SCALE_IMAGE, cv::Size(30, 30)); // Minimum size of obj
//sidefaceCascade.detectMultiScale(grey_image, faces, 1.1, 4, 0 | 1, Size(40, 40), Size(100, 100)); // Minimum size of obj
/****/
}
destroyAllWindows();
vcap.release();
}

opencv playing video slow speed

I have a problem with video speed, when I just run a video file in opencv it plays in normal speed.
The problem starts when I apply face and eye detection to the video it plays very slowly. I have tried to change waitKey() values , but the problem still exists.
Why is this happening?
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
// Function Headers
void detectAndDisplay(Mat frame);
// Global variables
// Copy this file from opencv/data/haarscascades to target folder
string face_cascade_name = "haarcascade_frontalface_alt.xml";
string eye_cascade_name = "haarcascade_mcs_eyepair_big.xml";
CascadeClassifier face_cascade;
CascadeClassifier eye_cascade;
string window_name = "Capture - Face detection";
int filenumber; // Number of file to be saved
string filename;
int main(void)
{
VideoCapture capture("m.mp4");
if (!capture.isOpened()) // check if we succeeded
return -1;
// Load the cascade
if (!face_cascade.load(face_cascade_name))
{
printf("--(!)Error loading\n");
return (-1);
}
if (!eye_cascade.load(eye_cascade_name))
{
printf("--(!)Error loading\n eye ");
return (-1);
}
// Read the video stream
Mat frame;
for (;;)
{
capture >> frame;
// Apply the classifier to the frame
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
printf(" --(!) No captured frame -- Break!");
break;
}
waitKey(40);
return 0;
}
}
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
std::vector<Rect> eyes;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 4, CASCADE_SCALE_IMAGE, Size(20, 20));
// Set Region of Interest
size_t i = 0; // ic is index of current element
for (i = 0; i < faces.size(); i++) // Iterate through all current elements (detected faces)
{
Point pt1(faces[i].x, faces[i].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[i].x + faces[i].height), (faces[i].y + faces[i].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
// set ROI for the eyes
Rect Roi = faces[i];
Roi.height = Roi.height / 4;
Roi.y = Roi.y + Roi.height;
cv::Mat crop = frame(Roi);
cvtColor(crop, gray, CV_BGR2GRAY);
imshow("ROI", gray);
eye_cascade.detectMultiScale(gray, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(20, 20));
for (size_t j = 0; j < eyes.size(); j++)
{
Rect r(Roi.x + eyes[j].x, Roi.y + eyes[j].y, eyes[j].width, eyes[j].height);
rectangle(frame, r, Scalar(255, 0, 0), 3, 1, 0);
// convert roi to ychannel
Mat eye_region = frame(eyes[j]).clone();
Mat eye_region_YCbCr;
cvtColor(eye_region, eye_region_YCbCr, CV_BGR2YCrCb);
vector<Mat> channels;
split(eye_region_YCbCr, channels);
Mat eye_region_Ychannel = channels[0].clone();
imshow("Y", eye_region_Ychannel);
// end of convert
}
}
// Show image
imshow("original", frame);
if (!crop.empty())
{
imshow("detected", crop);
}
else
{
destroyWindow("detected");
}
}

face tracking (eigen face) opencv

I want to do eigen face by opencv ,and this is my code.
#include "opencv2/core/core.hpp"'
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/objdetect/objdetect.hpp"
#include <iostream>
#include <fstream>
#include <sstream>
using namespace cv;
using namespace std;
static void read_csv(const string& filename, vector<Mat>& images,vector<int>& labels, char separator = ';') {
ifstream file(filename.c_str(), ifstream::in);
if (!file) {
string error_message = "No valid input file was given, please check the given filename.";
CV_Error(CV_StsBadArg, error_message);
}
string line, path, classlabel;
while (getline(file, line)) {
stringstream liness(line);
getline(liness, path, separator);
getline(liness, classlabel);
if(!path.empty() && !classlabel.empty()) {
images.push_back(imread(path, 0));
labels.push_back(atoi(classlabel.c_str()));
}
}
}
int main(int argc, const char *argv[]) {
string fn_haar = string("C:/opencv/sources/data/haarcascades/haarcascade_frontalface_alt_tree.xml");
string fn_csv = string("C:/Users/faho0odywbas/Desktop/csv.ext");
int deviceId = atoi("0");
vector<Mat> images;
vector<int> labels;
try {
read_csv(fn_csv, images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
exit(1);
}
int im_width = images[0].cols;
int im_height = images[0].rows;
Ptr<FaceRecognizer> model = createEigenFaceRecognizer(23,2500.0);
CascadeClassifier haar_cascade;
haar_cascade.load(fn_haar);
VideoCapture cap(deviceId);
namedWindow("face_recognizer", CV_WINDOW_AUTOSIZE);
if(!cap.isOpened()) {
cerr << "Capture Device ID " << deviceId << "cannot be opened." << endl;
return -1;
}
vector< Rect_<int> > faces;
Mat frame;
waitKey(1500);
for(;;) {
cap >> frame;
Mat original = frame.clone();
Mat gray;
cvtColor(original, gray, CV_BGR2GRAY);
haar_cascade.detectMultiScale(gray, faces);
for(int i = 0; i < faces.size(); i++) {
Rect face_i = faces[i];
Mat face = gray(face_i);
Mat face_resized;
cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
int prediction = model->predict(face_resized);
rectangle(original, face_i, CV_RGB(255, 0,0), 1);
string box_text = format("Sujeto = %d", prediction);
int pos_x = std::max(face_i.tl().x - 10, 0);
int pos_y = std::max(face_i.tl().y - 10, 0);
putText(original, box_text, Point(pos_x, pos_y), FONT_HERSHEY_PLAIN, 1.0, CV_RGB(0,255,0), 2.0);
}
imshow("face_recognizer", original);
char key = (char) waitKey(1);
if(key == 27){
destroyAllWindows();
break;
}
}
return 0;
}
I'm sure it's right because it was work with my friend's computer, so I think the problem with my visual studio or the windows.
The error that comes to me is
" First-chance exception at 0x74C44598 in Project3.exe: Microsoft C++ exception: cv::Exception at memory location 0x00D7ED2C.
If there is a handler for this exception, the program may be safely continued. "
and when I click break, it shows also to me this error
wkernelbase.pdb not loaded
so, what I have to do ?
I have done this Tools->Options->Debugging->Symbols->Select “Microsoft Symbol Servers”.
but also same error
I have another question which is if I want hit "s" to start recording face for every frame, and hit "d" to end the recording, how can I do that ?
Thanks a lot

Saving Images of Detected Faces in OpenCV

I have a code that detects faces and saves multiple cropped area images of them to a file path. My code doesn't stop saving images of detected faces until I physically close the program. For every one second a face is detected on a webcam, my code saves 6 images of the face.
Is it possible to have it save just one image per face detected? For example, if there is one face, only one image, if two faces, an image of both faces are saved etc. My code is below. Can anyone help me?
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
void detectAndDisplay(Mat frame);
string face_cascade_name = "C:\\opencv\\sources\\data\\haarcascades\\haarcascade_frontalface_alt2.xml";
CascadeClassifier face_cascade;
string window_name = "Window";
int filenumber;
string filename;
int main(void)
{
VideoCapture capture(0);
if (!capture.isOpened())
return -1;
if (!face_cascade.load(face_cascade_name))
{
cout << "error" << endl;
return (-1);
};
Mat frame;
for (;;)
{
capture >> frame;
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
cout << "error2" << endl;
break;
}
int c = waitKey(10);
if (27 == char(c))
{
break;
}
}
return 0;
}
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0;
int ac = 0;
size_t ib = 0;
int ab = 0;
for (ic = 0; ic < faces.size(); ic++)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR);
cvtColor(crop, gray, CV_BGR2GRAY);
filename = "C:\\Users\\Desktop\\Faces\\face";
stringstream ssfn;
ssfn << filename.c_str() << filenumber << ".jpg";
filename = ssfn.str();
cv::imwrite(filename, res);
filenumber++;
Point pt1(faces[ic].x, faces[ic].y);
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
sstm << "Crop area size: " << roi_b.width << "x" << roi_b.height << " Filename: " << filename;
text = sstm.str();
if (!crop.empty())
{
imshow("detected", crop);
}
else
destroyWindow("detected");
}