Every time I run this Code My Raspberry-pi 3b+ reboots - c++

So I'm working on a project to make a robotic car recognize a stop sign and some object and takes actions based on that.
now whenever I run this code It barley open the camera and then I lose connection to the VNC Viewer immediately.
I use 5 Volt, 3 A output power bank.
there is no problem when I run a code to open the camera but only the code below.
#include <opencv2/opencv.hpp>
#include <raspicam_cv.h>
#include <iostream>
#include <chrono>
#include <ctime>
#include <wiringPi.h>
using namespace std;
using namespace cv;
using namespace raspicam;
// Image Processing variables
Mat frame, Matrix, framePers, frameGray, frameThresh, frameEdge, frameFinal, frameFinalDuplicate, frameFinalDuplicate1;
Mat ROILane, ROILaneEnd;
int LeftLanePos, RightLanePos, frameCenter, laneCenter, Result, laneEnd;
RaspiCam_Cv Camera;
stringstream ss;
vector<int> histrogramLane;
vector<int> histrogramLaneEnd;
Point2f Source[] = {Point2f(40,135),Point2f(310,135),Point2f(10,185), Point2f(330,185)};
Point2f Destination[] = {Point2f(100,0),Point2f(280,0),Point2f(100,240), Point2f(280,240)};
//Machine Learning variables
CascadeClassifier Stop_Cascade;
Mat frame_stop;
Mat frame_Stop, RoI_Stop, gray_Stop;
vector<Rect> Stop;
int dist_Stop;
void Setup ( int argc,char **argv, RaspiCam_Cv &Camera )
{
Camera.set ( CAP_PROP_FRAME_WIDTH, ( "-w",argc,argv,400 ) );
Camera.set ( CAP_PROP_FRAME_HEIGHT, ( "-h",argc,argv,240 ) );
Camera.set ( CAP_PROP_BRIGHTNESS, ( "-br",argc,argv,60) );
Camera.set ( CAP_PROP_CONTRAST ,( "-co",argc,argv,70 ) );
Camera.set ( CAP_PROP_SATURATION, ( "-sa",argc,argv,50 ) );
Camera.set ( CAP_PROP_GAIN, ( "-g",argc,argv ,50) );
Camera.set ( CAP_PROP_FPS, ( "-fps",argc,argv,0));
}
void Capture()
{
Camera.grab();
Camera.retrieve( frame);
cvtColor(frame, frame, COLOR_BGR2RGB);
cvtColor(frame, frame_Stop, COLOR_BGR2RGB);
}
void Perspective()
{
line(frame,Source[0], Source[1], Scalar(0,0,255), 2);
line(frame,Source[1], Source[3], Scalar(0,0,255), 2);
line(frame,Source[3], Source[2], Scalar(0,0,255), 2);
line(frame,Source[2], Source[0], Scalar(0,0,255), 2);
Matrix = getPerspectiveTransform(Source, Destination);
warpPerspective(frame, framePers, Matrix, Size(400,240));
}
void Threshold()
{
cvtColor(framePers, frameGray, COLOR_RGB2GRAY);
inRange(frameGray, 240, 255, frameThresh);
Canny(frameGray,frameEdge, 900, 900, 3, false);
add(frameThresh, frameEdge, frameFinal);
cvtColor(frameFinal, frameFinal, COLOR_GRAY2RGB);
cvtColor(frameFinal, frameFinalDuplicate, COLOR_RGB2BGR); //used in histrogram function only
cvtColor(frameFinal, frameFinalDuplicate1, COLOR_RGB2BGR); //used in histrogram function only
}
void Histrogram()
{
histrogramLane.resize(400);
histrogramLane.clear();
for(int i=0; i<400; i++) //frame.size().width = 400
{
ROILane = frameFinalDuplicate(Rect(i,140,1,100));
divide(255, ROILane, ROILane);
histrogramLane.push_back((int)(sum(ROILane)[0]));
}
histrogramLaneEnd.resize(400);
histrogramLaneEnd.clear();
for (int i = 0; i < 400; i++)
{
ROILaneEnd = frameFinalDuplicate1(Rect(i, 0, 1, 240));
divide(255, ROILaneEnd, ROILaneEnd);
histrogramLaneEnd.push_back((int)(sum(ROILaneEnd)[0]));
}
laneEnd = sum(histrogramLaneEnd)[0];
cout<<"Lane END = "<<laneEnd<<endl;
}
void LaneFinder()
{
vector<int>:: iterator LeftPtr;
LeftPtr = max_element(histrogramLane.begin(), histrogramLane.begin() + 150);
LeftLanePos = distance(histrogramLane.begin(), LeftPtr);
vector<int>:: iterator RightPtr;
RightPtr = max_element(histrogramLane.begin() +250, histrogramLane.end());
RightLanePos = distance(histrogramLane.begin(), RightPtr);
line(frameFinal, Point2f(LeftLanePos, 0), Point2f(LeftLanePos, 240), Scalar(0, 255,0), 2);
line(frameFinal, Point2f(RightLanePos, 0), Point2f(RightLanePos, 240), Scalar(0,255,0), 2);
}
void LaneCenter()
{
laneCenter = (RightLanePos-LeftLanePos)/2 +LeftLanePos;
frameCenter = 188;
line(frameFinal, Point2f(laneCenter,0), Point2f(laneCenter,240), Scalar(0,255,0), 3);
line(frameFinal, Point2f(frameCenter,0), Point2f(frameCenter,240), Scalar(255,0,0), 3);
Result = laneCenter-frameCenter;
}
void Stop_detection()
{
if(!Stop_Cascade.load("//home//pi/Desktop//MACHINE LEARNING//Stop_cascade.xml"))
{
printf("Unable to open stop cascade file");
}
RoI_Stop = frame_Stop(Rect(0,0,400,240));
cvtColor(RoI_Stop, gray_Stop, COLOR_RGB2GRAY);
equalizeHist(gray_Stop, gray_Stop);
Stop_Cascade.detectMultiScale(gray_Stop, Stop);
for(int i=0; i<Stop.size(); i++)
{
Point P1(Stop[i].x, Stop[i].y);
Point P2(Stop[i].x + Stop[i].width, Stop[i].x + Stop[i].height);
rectangle(RoI_Stop, P1, P2, Scalar(0, 0, 255), 2);
putText(RoI_Stop, "Stop Sign", P1, FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255, 255), 2);
dist_Stop = (-0.91)*(P2.x-P1.x) + 102.597;
ss.str(" ");
ss.clear();
ss<<"D = "<<dist_Stop<<"cm";
putText(RoI_Stop, ss.str(), Point2f(1,130), 0,1, Scalar(0,0,255), 2);
}
}
int main(int argc,char **argv)
{
wiringPiSetup();
pinMode(21, OUTPUT);
pinMode(22, OUTPUT);
pinMode(23, OUTPUT);
pinMode(24, OUTPUT);
Setup(argc, argv, Camera);
cout<<"Connecting to camera"<<endl;
if (!Camera.open())
{
cout<<"Failed to Connect"<<endl;
}
cout<<"Camera Id = "<<Camera.getId()<<endl;
while(1)
{
auto start = std::chrono::system_clock::now();
Capture();
Perspective();
Threshold();
Histrogram();
LaneFinder();
LaneCenter();
Stop_detection();
if (laneEnd > 6000)
{
digitalWrite(21, 1);
digitalWrite(22, 1); //decimal = 7
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Lane End"<<endl;
}
if (Result == 0)
{
digitalWrite(21, 0);
digitalWrite(22, 0); //decimal = 0
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Forward"<<endl;
}
else if (Result >0 && Result <10)
{
digitalWrite(21, 1);
digitalWrite(22, 0); //decimal = 1
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Right1"<<endl;
}
else if (Result >=10 && Result <20)
{
digitalWrite(21, 0);
digitalWrite(22, 1); //decimal = 2
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Right2"<<endl;
}
else if (Result >20)
{
digitalWrite(21, 1);
digitalWrite(22, 1); //decimal = 3
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Right3"<<endl;
}
else if (Result <0 && Result >-10)
{
digitalWrite(21, 0);
digitalWrite(22, 0); //decimal = 4
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Left1"<<endl;
}
else if (Result <=-10 && Result >-20)
{
digitalWrite(21, 1);
digitalWrite(22, 0); //decimal = 5
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Left2"<<endl;
}
else if (Result <-20)
{
digitalWrite(21, 0);
digitalWrite(22, 1); //decimal = 6
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Left3"<<endl;
}
if (laneEnd > 6000)
{
ss.str(" ");
ss.clear();
ss<<" Lane End";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(255,0,0), 2);
}
else if (Result == 0)
{
ss.str(" ");
ss.clear();
ss<<"Result = "<<Result<<" Move Forward";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(0,0,255), 2);
}
else if (Result > 0)
{
ss.str(" ");
ss.clear();
ss<<"Result = "<<Result<<" Move Right";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(0,0,255), 2);
}
else if (Result < 0)
{
ss.str(" ");
ss.clear();
ss<<"Result = "<<Result<<" Move Left";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(0,0,255), 2);
}
namedWindow("orignal", WINDOW_KEEPRATIO);
moveWindow("orignal", 0, 100);
resizeWindow("orignal", 640, 480);
imshow("orignal", frame);
namedWindow("Perspective", WINDOW_KEEPRATIO);
moveWindow("Perspective", 640, 100);
resizeWindow("Perspective", 640, 480);
imshow("Perspective", framePers);
namedWindow("Final", WINDOW_KEEPRATIO);
moveWindow("Final", 1280, 100);
resizeWindow("Final", 640, 480);
imshow("Final", frameFinal);
namedWindow("Stop Sign", WINDOW_KEEPRATIO);
moveWindow("Stop Sign", 1280, 580);
resizeWindow("Stop Sign", 640, 480);
imshow("Stop Sign", RoI_Stop);
waitKey(1);
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
// float t = elapsed_seconds.count();
// int FPS = 1/t;
// cout<<"FPS = "<<FPS<<endl;
}
return 0;
}

Related

Is there a way to adjust the brightness and contrast automatically depending on the surrounding in openCV C++?

The pi camera detects the objects but it does not recognize them if the brightness or the contrast differ because of the location. It is a pi car so the location changes a lot and so the intensity of light in the room. Is there a code that can adjust the brightness/contrast of the pi camera while it's moving (not as a picture)?
#include <opencv2/opencv.hpp>
#include <raspicam_cv.h>
#include <iostream>
#include <chrono>
#include <ctime>
#include <wiringPi.h>
using namespace std;
using namespace cv;
using namespace raspicam;
// Image Processing variables
Mat frame, Matrix, framePers, frameGray, frameThresh, frameEdge, frameFinal, frameFinalDuplicate, frameFinalDuplicate1;
Mat ROILane, ROILaneEnd;
int LeftLanePos, RightLanePos, frameCenter, laneCenter, Result, laneEnd;
RaspiCam_Cv Camera;
stringstream ss;
vector<int> histrogramLane;
vector<int> histrogramLaneEnd;
Point2f Source[] = {Point2f(75,170),Point2f(335,170),Point2f(45,220), Point2f(355,220)};
Point2f Destination[] = {Point2f(100,0),Point2f(280,0),Point2f(100,240), Point2f(280,240)};
//Machine Learning variables
CascadeClassifier Stop_Cascade, Arrow_Cascade;
Mat frame_Stop, RoI_Stop, gray_Stop, frame_Arrow, RoI_Arrow, gray_Arrow;
vector<Rect> Stop, Arrow;
int dist_Stop, dist_Arrow;
void Setup ( int argc,char **argv, RaspiCam_Cv &Camera )
{
Camera.set ( CAP_PROP_FRAME_WIDTH, ( "-w",argc,argv,400 ) );
Camera.set ( CAP_PROP_FRAME_HEIGHT, ( "-h",argc,argv,240 ) );
Camera.set ( CAP_PROP_BRIGHTNESS, ( "-br",argc,argv,55) );
Camera.set ( CAP_PROP_CONTRAST ,( "-co",argc,argv,80 ) );
Camera.set ( CAP_PROP_SATURATION, ( "-sa",argc,argv,50 ) );
Camera.set ( CAP_PROP_GAIN, ( "-g",argc,argv ,50) );
Camera.set ( CAP_PROP_FPS, ( "-fps",argc,argv,0));
}
void Capture()
{
Camera.grab();
Camera.retrieve( frame);
cvtColor(frame, frame_Stop, COLOR_BGR2RGB);
cvtColor(frame, frame_Arrow, COLOR_BGR2RGB);
cvtColor(frame, frame, COLOR_BGR2RGB);
}
void Perspective()
{
line(frame,Source[0], Source[1], Scalar(0,0,255), 2);
line(frame,Source[1], Source[3], Scalar(0,0,255), 2);
line(frame,Source[3], Source[2], Scalar(0,0,255), 2);
line(frame,Source[2], Source[0], Scalar(0,0,255), 2);
Matrix = getPerspectiveTransform(Source, Destination);
warpPerspective(frame, framePers, Matrix, Size(400,240));
}
void Threshold()
{
cvtColor(framePers, frameGray, COLOR_RGB2GRAY);
inRange(frameGray, 240, 255, frameThresh);
Canny(frameGray,frameEdge, 900, 900, 3, false);
add(frameThresh, frameEdge, frameFinal);
cvtColor(frameFinal, frameFinal, COLOR_GRAY2RGB);
cvtColor(frameFinal, frameFinalDuplicate, COLOR_RGB2BGR); //used in histrogram function only
cvtColor(frameFinal, frameFinalDuplicate1, COLOR_RGB2BGR); //used in histrogram function only
}
void Histrogram()
{
histrogramLane.resize(400);
histrogramLane.clear();
for(int i=0; i<400; i++) //frame.size().width = 400
{
ROILane = frameFinalDuplicate(Rect(i,140,1,100));
divide(255, ROILane, ROILane);
histrogramLane.push_back((int)(sum(ROILane)[0]));
}
histrogramLaneEnd.resize(400);
histrogramLaneEnd.clear();
for (int i = 0; i < 400; i++)
{
ROILaneEnd = frameFinalDuplicate1(Rect(i, 0, 1, 240));
divide(255, ROILaneEnd, ROILaneEnd);
histrogramLaneEnd.push_back((int)(sum(ROILaneEnd)[0]));
}
laneEnd = sum(histrogramLaneEnd)[0];
cout<<"Lane END = "<<laneEnd<<endl;
}
void LaneFinder()
{
vector<int>:: iterator LeftPtr;
LeftPtr = max_element(histrogramLane.begin(), histrogramLane.begin() + 150);
LeftLanePos = distance(histrogramLane.begin(), LeftPtr);
vector<int>:: iterator RightPtr;
RightPtr = max_element(histrogramLane.begin() +250, histrogramLane.end());
RightLanePos = distance(histrogramLane.begin(), RightPtr);
line(frameFinal, Point2f(LeftLanePos, 0), Point2f(LeftLanePos, 240), Scalar(0, 255,0), 2);
line(frameFinal, Point2f(RightLanePos, 0), Point2f(RightLanePos, 240), Scalar(0,255,0), 2);
}
void LaneCenter()
{
laneCenter = (RightLanePos-LeftLanePos)/2 +LeftLanePos;
frameCenter = 188;
line(frameFinal, Point2f(laneCenter,0), Point2f(laneCenter,240), Scalar(0,255,0), 3);
line(frameFinal, Point2f(frameCenter,0), Point2f(frameCenter,240), Scalar(255,0,0), 3);
Result = laneCenter-frameCenter;
}
void Stop_detection()
{
if(!Stop_Cascade.load("//home//pi/Desktop//MACHINE LEARNING//Stop_cascade.xml"))
{
printf("Unable to open stop cascade file");
}
RoI_Stop = frame_Stop(Rect(0,0,400,240));
cvtColor(RoI_Stop, gray_Stop, COLOR_RGB2GRAY);
equalizeHist(gray_Stop, gray_Stop);
Stop_Cascade.detectMultiScale(gray_Stop, Stop);
for(int i=0; i<Stop.size(); i++)
{
Point P1(Stop[i].x, Stop[i].y);
Point P2(Stop[i].x + Stop[i].width, Stop[i].x + Stop[i].height);
rectangle(RoI_Stop, P1, P2, Scalar(0, 0, 255), 2);
putText(RoI_Stop, "Stop Sign", P1, FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255, 255), 2);
dist_Stop = (-0.91)*(P2.x-P1.x) + 71.86;
ss.str(" ");
ss.clear();
ss<<"D = "<<dist_Stop<<"cm";
putText(RoI_Stop, ss.str(), Point2f(1,130), 0,1, Scalar(0,0,255), 2);
}
}
void Arrow_detection()
{
if(!Arrow_Cascade.load("//home//pi/Desktop//MACHINE LEARNING//Arrow_cascade.xml"))
{
printf("Unable to open Arrow cascade file");
}
RoI_Arrow = frame_Arrow(Rect(0,0,400,240));
cvtColor(RoI_Arrow, gray_Arrow, COLOR_RGB2GRAY);
equalizeHist(gray_Arrow, gray_Arrow);
Arrow_Cascade.detectMultiScale(gray_Arrow, Arrow);
for(int i=0; i<Arrow.size(); i++)
{
Point P1(Arrow[i].x, Arrow[i].y);
Point P2(Arrow[i].x + Arrow[i].width, Arrow[i].x + Arrow[i].height);
rectangle(RoI_Arrow, P1, P2, Scalar(0, 0, 255), 2);
putText(RoI_Arrow, "Arrow", P1, FONT_HERSHEY_PLAIN, 1, Scalar(0, 0, 255, 255), 2);
dist_Arrow = (-1)*(P2.x-P1.x) + 82;
ss.str(" ");
ss.clear();
ss<<"D = "<<dist_Arrow <<"cm";
putText(RoI_Arrow, ss.str(), Point2f(1,130), 0,1, Scalar(0,0,255), 2);
}
}
int main(int argc,char **argv)
{
wiringPiSetup();
pinMode(21, OUTPUT);
pinMode(22, OUTPUT);
pinMode(23, OUTPUT);
pinMode(24, OUTPUT);
Setup(argc, argv, Camera);
cout<<"Connecting to camera"<<endl;
if (!Camera.open())
{
cout<<"Failed to Connect"<<endl;
}
cout<<"Camera Id = "<<Camera.getId()<<endl;
while(1)
{
auto start = std::chrono::system_clock::now();
Capture();
Perspective();
Threshold();
Histrogram();
LaneFinder();
LaneCenter();
Stop_detection();
Arrow_detection();
if (dist_Stop > 5 && dist_Stop < 30)
{
digitalWrite(21, 0);
digitalWrite(22, 0); //decimal = 8
digitalWrite(23, 0);
digitalWrite(24, 1);
cout<<"Stop Sign"<<endl;
dist_Stop = 0;
goto Stop_Sign;
}
if (laneEnd > 6000)
{
digitalWrite(21, 1);
digitalWrite(22, 1); //decimal = 7
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Lane End"<<endl;
}
if (Result == 0)
{
digitalWrite(21, 0);
digitalWrite(22, 0); //decimal = 0
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Forward"<<endl;
}
else if (Result >0 && Result <10)
{
digitalWrite(21, 1);
digitalWrite(22, 0); //decimal = 1
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Right1"<<endl;
}
else if (Result >=10 && Result <20)
{
digitalWrite(21, 0);
digitalWrite(22, 1); //decimal = 2
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Right2"<<endl;
}
else if (Result >20)
{
digitalWrite(21, 1);
digitalWrite(22, 1); //decimal = 3
digitalWrite(23, 0);
digitalWrite(24, 0);
cout<<"Right3"<<endl;
}
else if (Result <0 && Result >-10)
{
digitalWrite(21, 0);
digitalWrite(22, 0); //decimal = 4
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Left1"<<endl;
}
else if (Result <=-10 && Result >-20)
{
digitalWrite(21, 1);
digitalWrite(22, 0); //decimal = 5
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Left2"<<endl;
}
else if (Result <-20)
{
digitalWrite(21, 0);
digitalWrite(22, 1); //decimal = 6
digitalWrite(23, 1);
digitalWrite(24, 0);
cout<<"Left3"<<endl;
}
Stop_Sign:
if (laneEnd > 7000)
{
ss.str(" ");
ss.clear();
ss<<" Lane End";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(255,0,0), 2);
}
else if (Result == 0)
{
ss.str(" ");
ss.clear();
ss<<"Result = "<<Result<<" Move Forward";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(0,0,255), 2);
}
else if (Result > 0)
{
ss.str(" ");
ss.clear();
ss<<"Result = "<<Result<<" Move Right";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(0,0,255), 2);
}
else if (Result < 0)
{
ss.str(" ");
ss.clear();
ss<<"Result = "<<Result<<" Move Left";
putText(frame, ss.str(), Point2f(1,50), 0,1, Scalar(0,0,255), 2);
}
namedWindow("orignal", WINDOW_KEEPRATIO);
moveWindow("orignal", 0, 100);
resizeWindow("orignal", 420, 300);
imshow("orignal", frame);
namedWindow("Perspective", WINDOW_KEEPRATIO);
moveWindow("Perspective", 400, 100);
resizeWindow("Perspective", 420, 300);
imshow("Perspective", framePers);
namedWindow("Final", WINDOW_KEEPRATIO);
moveWindow("Final", 800, 100);
resizeWindow("Final", 420, 300);
imshow("Final", frameFinal);
namedWindow("Stop Sign", WINDOW_KEEPRATIO);
moveWindow("Stop Sign", 1200, 100);
resizeWindow("Stop Sign", 420, 300);
imshow("Stop Sign", RoI_Stop);
namedWindow("Arrow", WINDOW_KEEPRATIO);
moveWindow("Arrow", 1200, 450);
resizeWindow("Arrow", 420, 300);
imshow("Arrow", RoI_Arrow);
waitKey(1);
auto end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_seconds = end-start;
float t = elapsed_seconds.count();
int FPS = 1/t;
// cout<<"FPS = "<<FPS<<endl;
}
return 0;
}

Face Landmarks and stabilization using optical flow

I write program when window show face and some special points(68).
I use Haar casscade and FaceLandmarkLBF.I have problem in my program. When face have stable position face points are jitter(shaking). How I can fix that? Thanks.
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/face.hpp>
using cv::Scalar;
using cv::Point;
int main(int argc, char** argv)
{
cv::CascadeClassifier faceDetector("haarcascade_frontalface_alt2.xml");
cv::Ptr<cv::face::Facemark>facemark = cv::face::FacemarkLBF::create();
facemark->loadModel("lbfmodel.yaml");
cv::VideoCapture vc(0);
while (true)
{
cv::Mat frame, gray;
vc.read(frame);
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
//
std::vector<cv::Rect> faces;
faceDetector.detectMultiScale(gray, faces);
std::vector< std::vector<cv::Point2f> > landmarks;
bool success = facemark->fit(frame, faces, landmarks);
for (size_t i = 0; i < landmarks.size(); i++)
{
for (size_t j = 0; j < landmarks[i].size(); j++)
{
cv::circle(frame, cv::Point(landmarks[i][j].x, landmarks[i][j].y), 2, Scalar(255, 0, 0), 2);
}
}
cv::imshow("1", frame);
if ((char)cv::waitKey(20) == 27)
break;
}
return 0;
}
I saw #Nuzhny link : lkdemo.cpp. Not everything is clear for me.
I done rewrite my code but nothing changed:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include "opencv2/video/tracking.hpp"
#include <opencv2/face.hpp>
int main(int argc, char** argv)
{
cv::CascadeClassifier faceDetector("haarcascade_frontalface_alt2.xml");
cv::Ptr<cv::face::Facemark>facemark = cv::face::FacemarkLBF::create();
facemark->loadModel("lbfmodel.yaml");
cv::VideoCapture vc(0);
cv::Mat gray, prevGray, image, frame;
cv::Size subPixWinSize(10, 10), winSize(64, 64);
cv::TermCriteria termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.03);
std::vector<uchar> status;
std::vector<float> err;
std::vector<cv::Point2f> oldLandmarks;
std::vector< std::vector<cv::Point2f> > landmarks;
bool b = true;
while (true)
{
vc.read(frame);
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
std::vector<cv::Rect> faces;
faceDetector.detectMultiScale(gray, faces);
bool success = facemark->fit(frame, faces, landmarks);
if (!success)
{
cv::imshow("1", frame);
continue;
}
if (oldLandmarks.empty())
oldLandmarks = landmarks.front();
if (prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, landmarks.front(), oldLandmarks, status, err, winSize, 3, termcrit, cv::OPTFLOW_LK_GET_MIN_EIGENVALS, 0.001);
for (size_t i = 0; i < oldLandmarks.size(); i++)
{
cv::circle(frame, cv::Point(oldLandmarks[i].x, oldLandmarks[i].y), 2, cv::Scalar(255, 0, 0), 2);
}
cv::imshow("1", frame);
std::swap(oldLandmarks, landmarks.front());
cv::swap(prevGray, gray);
if ((char)cv::waitKey(20) == 27)
break;
}
return 0;
}
Only LK tracking may be not enough. I'm writing some simple application for correcting landmarks after LK with linear Kalman filter (EDIT 2 - remove prev landmarks):
#include <opencv2/opencv.hpp>
#include <opencv2/face.hpp>
///
class PointState
{
public:
PointState(cv::Point2f point)
:
m_point(point),
m_kalman(4, 2, 0, CV_64F)
{
Init();
}
void Update(cv::Point2f point)
{
cv::Mat measurement(2, 1, CV_64FC1);
if (point.x < 0 || point.y < 0)
{
Predict();
measurement.at<double>(0) = m_point.x; //update using prediction
measurement.at<double>(1) = m_point.y;
m_isPredicted = true;
}
else
{
measurement.at<double>(0) = point.x; //update using measurements
measurement.at<double>(1) = point.y;
m_isPredicted = false;
}
// Correction
cv::Mat estimated = m_kalman.correct(measurement);
m_point.x = static_cast<float>(estimated.at<double>(0)); //update using measurements
m_point.y = static_cast<float>(estimated.at<double>(1));
Predict();
}
cv::Point2f GetPoint() const
{
return m_point;
}
bool IsPredicted() const
{
return m_isPredicted;
}
private:
cv::Point2f m_point;
cv::KalmanFilter m_kalman;
double m_deltaTime = 0.2;
double m_accelNoiseMag = 0.3;
bool m_isPredicted = false;
void Init()
{
m_kalman.transitionMatrix = (cv::Mat_<double>(4, 4) <<
1, 0, m_deltaTime, 0,
0, 1, 0, m_deltaTime,
0, 0, 1, 0,
0, 0, 0, 1);
m_kalman.statePre.at<double>(0) = m_point.x; // x
m_kalman.statePre.at<double>(1) = m_point.y; // y
m_kalman.statePre.at<double>(2) = 1; // init velocity x
m_kalman.statePre.at<double>(3) = 1; // init velocity y
m_kalman.statePost.at<double>(0) = m_point.x;
m_kalman.statePost.at<double>(1) = m_point.y;
cv::setIdentity(m_kalman.measurementMatrix);
m_kalman.processNoiseCov = (cv::Mat_<double>(4, 4) <<
pow(m_deltaTime, 4.0) / 4.0, 0, pow(m_deltaTime, 3.0) / 2.0, 0,
0, pow(m_deltaTime, 4.0) / 4.0, 0, pow(m_deltaTime, 3.0) / 2.0,
pow(m_deltaTime, 3.0) / 2.0, 0, pow(m_deltaTime, 2.0), 0,
0, pow(m_deltaTime, 3.0) / 2.0, 0, pow(m_deltaTime, 2.0));
m_kalman.processNoiseCov *= m_accelNoiseMag;
cv::setIdentity(m_kalman.measurementNoiseCov, cv::Scalar::all(0.1));
cv::setIdentity(m_kalman.errorCovPost, cv::Scalar::all(.1));
}
cv::Point2f Predict()
{
cv::Mat prediction = m_kalman.predict();
m_point.x = static_cast<float>(prediction.at<double>(0));
m_point.y = static_cast<float>(prediction.at<double>(1));
return m_point;
}
};
///
void TrackPoints(cv::Mat prevFrame, cv::Mat currFrame,
const std::vector<cv::Point2f>& currLandmarks,
std::vector<PointState>& trackPoints)
{
// Lucas-Kanade
cv::TermCriteria termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 30, 0.01);
cv::Size winSize(7, 7);
std::vector<uchar> status(trackPoints.size(), 0);
std::vector<float> err;
std::vector<cv::Point2f> newLandmarks;
std::vector<cv::Point2f> prevLandmarks;
std::for_each(trackPoints.begin(), trackPoints.end(), [&](const PointState& pts) { prevLandmarks.push_back(pts.GetPoint()); });
cv::calcOpticalFlowPyrLK(prevFrame, currFrame, prevLandmarks, newLandmarks, status, err, winSize, 3, termcrit, 0, 0.001);
for (size_t i = 0; i < status.size(); ++i)
{
if (status[i])
{
trackPoints[i].Update((newLandmarks[i] + currLandmarks[i]) / 2);
}
else
{
trackPoints[i].Update(currLandmarks[i]);
}
}
}
///
int main(int argc, char** argv)
{
cv::CascadeClassifier faceDetector("haarcascade_frontalface_alt2.xml");
cv::Ptr<cv::face::Facemark> facemark = cv::face::FacemarkLBF::create();
facemark->loadModel("lbfmodel.yaml");
cv::VideoCapture cam(0, cv::CAP_DSHOW);
cv::namedWindow("Facial Landmark Detection", cv::WINDOW_NORMAL);
cv::Mat frame;
cv::Mat currGray;
cv::Mat prevGray;
std::vector<PointState> trackPoints;
trackPoints.reserve(68);
while (cam.read(frame))
{
std::vector<cv::Rect> faces;
cv::cvtColor(frame, currGray, cv::COLOR_BGR2GRAY);
faceDetector.detectMultiScale(currGray, faces, 1.1, 3, cv::CASCADE_FIND_BIGGEST_OBJECT);
std::vector<std::vector<cv::Point2f>> landmarks;
bool success = facemark->fit(frame, faces, landmarks);
if (success)
{
if (prevGray.empty())
{
trackPoints.clear();
for (cv::Point2f lp : landmarks[0])
{
trackPoints.emplace_back(lp);
}
}
else
{
if (trackPoints.empty())
{
for (cv::Point2f lp : landmarks[0])
{
trackPoints.emplace_back(lp);
}
}
else
{
TrackPoints(prevGray, currGray, landmarks[0], trackPoints);
}
}
for (const PointState& tp : trackPoints)
{
cv::circle(frame, tp.GetPoint(), 3, tp.IsPredicted() ? cv::Scalar(0, 0, 255) : cv::Scalar(0, 255, 0), cv::FILLED);
}
for (cv::Point2f lp : landmarks[0])
{
cv::circle(frame, lp, 2, cv::Scalar(255, 0, 255), cv::FILLED);
}
}
cv::imshow("Facial Landmark Detection", frame);
if (cv::waitKey(1) == 27)
break;
prevGray = currGray;
}
return 0;
}
So, the margenta points - raw landmarks and green points - corrected after LK+Kalman: result video.
You can change Kalman options with 2 constants:
double m_deltaTime = 0.2;
double m_accelNoiseMag = 0.3;
It's latency and noise.

write string continously using fprintf

My project is tracking object using opencv and send the coordinate to arduino as tx and read the data using another arduino (rx) with 'SoftwareSerial'. There are no problem with object tracking, knowing the coordinate, and communication between 2 arduino. The problem is i can not send the coordinate while 'tracker' running, but when I close the 'tracker', the data start appearing in the serial com.
opencv code
using namespace cv;
using namespace std;
//default capture width and height
int FRAME_WIDTH = 320; //640 320
int FRAME_HEIGHT = 240; //480 240
int MIN_OBJECT_AREA = 10*10;
int iLowH = 16;
int iHighH = 104;
int iLowS = 110;
int iHighS = 164;
int iLowV = 63;
int iHighV = 255;
int centerX, centerY;
int Xg,Yg;
int Modefilter = 1;
FILE *fp;
bool kirim=false;
string intToString(int number){
std::stringstream ss;
ss << number;
return ss.str();
}
void detect(){
}
int main( int argc, char** argv ){
//open serial
FILE* serial = fopen("\\\\.\\COM3", "w+");
if (serial == 0) {
printf("Failed to open serial port\n");
}
//capture the video from web cam
VideoCapture cap(0);
// if not success, exit program
if ( !cap.isOpened() ){
cout << "Cannot open the web cam" << endl;
return -1;
}
//set height and width of capture frame
cap.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
cap.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
//create a window called "Control"
namedWindow("Control", CV_WINDOW_AUTOSIZE);
//Create trackbars in "Control" window
cvCreateTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
cvCreateTrackbar("HighH", "Control", &iHighH, 179);
cvCreateTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
cvCreateTrackbar("HighS", "Control", &iHighS, 255);
cvCreateTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
cvCreateTrackbar("HighV", "Control", &iHighV, 255);
string XX,YY,parser1,parser2,result;
while (serial!=0){
Mat imgOriginal;
bool bSuccess = cap.read(imgOriginal); // read a new frame from video
if (!bSuccess){ //if not success, break loop
cout << "Cannot read a frame from video stream" << endl;
break;
}
//Convert the captured frame from BGR to HSV
Mat imgHSV;
cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV);
//find center point
centerX = FRAME_WIDTH/2;
centerY = FRAME_HEIGHT/2;
putText(imgOriginal, "Tekan", Point(5,10), FONT_HERSHEY_COMPLEX, 0.35, Scalar(0, 255, 0), 0.25, 8);
putText(imgOriginal, "a : Mulai Mengikuti Objek", Point(5,20), FONT_HERSHEY_COMPLEX, 0.35, Scalar(0, 255, 0), 0.25, 8);
putText(imgOriginal, "b : Berhenti Mengikuti Objek", Point(5,30), FONT_HERSHEY_COMPLEX, 0.35, Scalar(0, 255, 0), 0.25, 8);
//create cross line
line(imgOriginal,Point(centerX, centerY-20), Point(centerX, centerY+20), Scalar(0,255,0), 1.5);
line(imgOriginal,Point(centerX-20, centerY), Point(centerX+20, centerY), Scalar(0,255,0), 1.5);
//Threshold the image
Mat imgThresholded;
inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded);
//morphological opening (remove small objects from the foreground)
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
//morphological closing (fill small holes in the foreground)
dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
Mat imgContour;
imgThresholded.copyTo(imgContour);
//find contours of filtered image using openCV findContours function
findContours(imgContour,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
if(area>MIN_OBJECT_AREA){ //jika area kontur lebih besar dari minimum area object maka gambar lingkaran dan tulis koordinat
kirim=true;
double x = moment.m10/area;
double y = moment.m01/area;
double r = sqrt(area/3.14);
Xg=(int) x;
Yg=(int) y;
circle(imgOriginal, Point(x,y), r, Scalar(0,0,255), 1.5, 8);
line(imgOriginal, Point(x,y-r-5), Point(x,y+r+5), Scalar(0,0,255), 1.5, 8);
line(imgOriginal, Point(x-r-5,y), Point(x+r+5,y), Scalar(0,0,255), 1.5, 8);
putText(imgOriginal, intToString(x) + "," + intToString(y), Point(x,y+10), FONT_HERSHEY_COMPLEX, 0.25, Scalar(0, 255, 0), 0.3, 8);
// send x,y coordinate to arduino
parser1="*"; parser2="!";
ostringstream xxx,yyy ;
xxx << Xg;
yyy << Yg;
XX=xxx.str(); YY=yyy.str();
result=parser1+XX+parser2+YY;
cout << result << endl;
fprintf(serial, "%s\n", result.c_str());
fflush(serial);
}//end if
}//end for
}//end if
//show the thresholded image
Mat dstimgThresholded;
resize(imgThresholded, dstimgThresholded, Size(), 2, 2, INTER_CUBIC);
imshow("Thresholded Image", dstimgThresholded);
//show the original image
Mat dstimgOriginal;
resize(imgOriginal, dstimgOriginal, Size(), 2, 2, INTER_CUBIC);
imshow("Original", dstimgOriginal);
///send data
if (waitKey(5) == 27) {//wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
cout << "esc key is pressed by user" << endl;
break;
}
} //end while
return 0;
}
tx arduino
#include <SoftwareSerial.h>
SoftwareSerial SWsend(2, 3); // (rx,tx)
String data;
void setup() {
// put your setup code here, to run once:
Serial.begin(115200);
SWsend.begin(4800);
//pinMode(12,INPUT_PULLUP);
}
void Send_SSIDandPWD_ToESP01() {
while (!SWsend) {
; // wait for serial port to connect.
Serial.println(F("wait for serial port to connect."));
}
while (Serial.available()) {
data = Serial.readString();
//data = Serial.read();
Serial.print(F("Send:"));
Serial.println(data);
SWsend.print(data);
}
}
void loop() {
// put your main code here, to run repeatedly:
Send_SSIDandPWD_ToESP01();
delay(25);
}
rx arduino
#include <SoftwareSerial.h>
SoftwareSerial SWrecv(2, 3); //(rx,tx)
String strSSID = ""; // a string to hold incoming data
String strPWD = "";
bool keepSSID = false;
bool keepPWD = false;
boolean stringComplete = false; // whether the string is complete
void setup() {
// initialize serial:
Serial.begin(115200);
SWrecv.begin(4800);
// Turn on the blacklight and print a message.
Serial.println(F("Hello, world!"));
}
void loop() {
// print the string when a newline arrives:
SWrecvEvent();
if (stringComplete) {
Serial.println("X:" + strSSID + ", Y:" + strPWD);
// clear the string:
// inputString = "";
if (strSSID == "")
{
Serial.println("SSID:not config");
}
strSSID = ""; // a string to hold incoming data
strPWD = "";
stringComplete = false;
}
}
void SWrecvEvent() {
while (SWrecv.available()) {
// get the new byte:
char inChar = (char)SWrecv.read();
//Serial.print(inChar); ///////////////////string asli
// add it to the inputString:
switch (inChar ) {
case '*':
{
keepSSID = true;
keepPWD = false;
}
break;
case '!':
{
keepSSID = false;
keepPWD = true;
}
break;
default:
{
if (inChar == '\n') {
stringComplete = true;
keepSSID = false;
keepPWD = false;
return;
}
if (keepSSID == true )
{
strSSID += inChar;
}
else if ( keepPWD == true )
{
strPWD += inChar;
}
}
break;
}
}
}
what should I do to write coordinate countinously while the tracker is running?
It could be that the IO is being buffered, and only flushed when you exit your tx program. Try calling Flush to force the data to send.

How to display the final decision for face detection using opencv

I am a beginner of using opencv. I using opencv 2.4 with VS2013. I had develop face detection code and success but the decision made is from frame by frame. How can I made final decision by combining all the frame or frame averaging?? For example, when the total face detected is 90% detected, the final decision is 'FACE DETECTED' and vice versa.
Here is my code:
int main(int argc, char** argv)
{
CvCapture* capture;
capture = cvCaptureFromFile("C:/Users/user/Desktop/Face and Motion/45.avi");
//assert(capture != NULL); //terminate if capture is NULL
IplImage* frame;
while (true)
{
frame = cvQueryFrame(capture);
if (!frame)
break;
cvShowImage("original", frame); //show
CvMemStorage* storage = cvCreateMemStorage(0);
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad("C:/opencv2410/sources/data/haarcascades/haarcascade_frontalface_alt.xml");
cvClearMemStorage(storage);
CvSeq* faces = cvHaarDetectObjects(frame, cascade, storage, 1.1, 4, 0, cvSize(40, 50));
CvRect* r;
if (faces) //change from (!faces) to (faces)
{
for (int i = 0; i < (faces ? faces->total : 0); i++)
{
r = (CvRect*)cvGetSeqElem(faces, i);
//cvRectangle(frame, cvPoint(100, 50), cvPoint(200, 200), CV_RGB(255, 0, 0), 5, 8);
cvRectangle(frame, cvPoint(r->x, r->y), cvPoint(r->x + r->width, r->y + r->height), CV_RGB(255, 0, 0));
cvShowImage("RESULTS", frame);
char c = cvWaitKey(1000);
}
}
else
{
cvShowImage("RESULT", frame);
}
printf("%d Face Found !\n", faces ? faces->total : 0);
if ((faces ? faces->total : 0) == 0)
{
printf("FACE NOT DETECTED !\n\n");
}
else
{
printf("FACE DETECTED !\n\n");
}
}
return (0);
}
you need two variables like
int frame_count = 0;
int detected_face_count = 0;
you should increase frame_count for every frame and increase detected_face_count when a face detected. finally detected_face_count / frame_count gives your desired value.
however, i did some corrections on your code (i could not test it)
changes explained by comments. i hope it will be useful.
int main(int argc, char** argv)
{
CvCapture* capture;
capture = cvCaptureFromFile("C:/Users/user/Desktop/Face and Motion/45.avi");
//assert(capture != NULL); //terminate if capture is NULL
IplImage* frame;
//you need two variables
int frame_count = 0;
int detected_face_count = 0;
// these lines should not be in the loop
CvMemStorage* storage = cvCreateMemStorage(0);
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad("C:/opencv2410/sources/data/haarcascades/haarcascade_frontalface_alt.xml");
while (true)
{
frame = cvQueryFrame(capture);
if (!frame)
break;
frame_count++; // increase frame_count
cvShowImage("original", frame); //show
cvClearMemStorage(storage);
CvSeq* faces = cvHaarDetectObjects(frame, cascade, storage, 1.1, 4, 0, cvSize(40, 50));
CvRect* r;
for (int i = 0; i < (faces ? faces->total : 0); i++)
{
r = (CvRect*)cvGetSeqElem(faces, i);
//cvRectangle(frame, cvPoint(100, 50), cvPoint(200, 200), CV_RGB(255, 0, 0), 5, 8);
cvRectangle(frame, cvPoint(r->x, r->y), cvPoint(r->x + r->width, r->y + r->height), CV_RGB(255, 0, 0));
}
cvShowImage("RESULT", frame);
char c = cvWaitKey(1000);
printf("%d Face Found !\n", faces ? faces->total : 0);
if ((faces ? faces->total : 0) == 0)
{
printf("FACE NOT DETECTED !\n\n");
}
else
{
detected_face_count ++; // increase detected_face_count
printf("FACE DETECTED !\n\n");
}
}
if( (float)(detected_face_count / frame_count) > 0.89 )
{
printf("FACES DETECTED over %90 of frames!\n\n");
}
return (0);
}
as #Miki stated you should not use deprecated C api
take a look at the code below using C++ api (tested with OpenCV 3.1)
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture capture("C:/Users/user/Desktop/Face and Motion/45.avi");
if(!capture.isOpened())
{
printf("Video file could not be opened !\n");
return -1;
}
Mat frame;
//you need two variables
int frame_count = 0;
int detected_face_count = 0;
// these lines should not be in the loop
CascadeClassifier cascade;
if( !cascade.load( "C:/opencv2410/sources/data/haarcascades/haarcascade_frontalface_alt.xml" ) )
{
cerr << "ERROR: Could not load classifier cascade" << endl;
return -1;
}
while (true)
{
capture >>frame;
if (frame.empty())
break;
frame_count++; // increase frame_count
imshow("original", frame); //show
vector<Rect> faces;
cascade.detectMultiScale(frame, faces,1.1, 4, 0, Size(40, 50));
for (int i = 0; i < faces.size(); i++)
{
rectangle(frame, faces[i], Scalar(0, 0, 255));
}
imshow("RESULT", frame);
char c = waitKey(10);
printf("%d Face Found !\n", faces.size());
if ( faces.size() == 0 )
{
printf("FACE NOT DETECTED !\n\n");
}
else
{
detected_face_count ++; // increase detected_face_count
printf("FACE DETECTED !\n\n");
}
}
printf("count of frames : %d \n", frame_count);
printf("count of frames has detected face : %d \n", detected_face_count);
printf("Face Found %d percent of frames!\n", (int)(100 * detected_face_count / frame_count));
if( (float)(detected_face_count / frame_count) > 0.89 )
{
printf("FACES DETECTED over %90 of frames!\n\n");
}
return (0);
}

face detection (how to detect the circle around the face that we've detected)

my name is budi.
i am a newbie in image processing, recently i've been trying to learn about opencv and visual studio. i already succeed to detect the face then draw the circle around the face that i've detected thanks to the example in some DIY website. my question is, how to detect a circle that encircling the face ? so i can use it for "if" condition, for example
if ( condition )//there is at least one circle in the frame
{
bla bla bla
}
here is the code i use :
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ocl/ocl.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/opencv_modules.hpp>
#include <opencv2/videostab/deblurring.hpp>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <Windows.h>
#include <sstream>
#include <string>
using namespace std;
using namespace cv;
const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(0,128,255),
CV_RGB(0,255,255),
CV_RGB(0,255,0),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)
} ;
void Draw(Mat& img, vector<Rect>& faces, double scale);
int main(int argc, const char** argv)
{
// Setup serial port connection and needed variables.
HANDLE hSerial = CreateFile(L"COM8", GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0);
if (hSerial !=INVALID_HANDLE_VALUE)
{
printf("Port opened! \n");
DCB dcbSerialParams;
GetCommState(hSerial,&dcbSerialParams);
dcbSerialParams.BaudRate = CBR_9600;
dcbSerialParams.ByteSize = 8;
dcbSerialParams.Parity = NOPARITY;
dcbSerialParams.StopBits = ONESTOPBIT;
//CvMemStorage* p_strStorage;
char incomingData[256] = ""; // don't forget to pre-allocate memory
//printf("%s\n",incomingData);
int dataLength = 256;
int readResult = 0;
SetCommState(hSerial, &dcbSerialParams);
}
else
{
if (GetLastError() == ERROR_FILE_NOT_FOUND)
{
printf("Serial port doesn't exist! \n");
}
printf("Error while setting up serial port! \n");
}
char outputChars[] ="c" ;
DWORD btsIO;
//void Draw(Mat& img, vector<Rect>& faces, double scale);
Mat frame, frameCopy, image;
//int i; // loop counter
//char charCheckForEscKey; // char for checking key press (Esc exits program)
//create the cascade classifier object used for the face detection
CascadeClassifier face_cascade;
//use the haarcascade_frontalface_alt.xml library
face_cascade.load("haarcascade_frontalface_alt.xml");
//setup video capture device and link it to the first capture device
VideoCapture captureDevice;
captureDevice.open(0);
if(captureDevice.open(0) == NULL)
{ // if capture was not successful . . .
printf("error: capture error \n"); // error message to standard out . . .
getchar(); // getchar() to pause for user see message . . .
return(-1);
}
//setup image files used in the capture process
Mat captureFrame;
Mat grayscaleFrame;
//create a window to present the results
namedWindow("FaceDetection", 1);
//int servoPosition = 90;
//int servoOrientation = 0;
//int servoPosition1=90;
//int servoOrientation1=0;
//create a loop to capture and find faces
while(true)
{
//p_imgOriginal = captureFrame;
//capture a new image frame
captureDevice>>captureFrame;
//convert captured image to gray scale and equalize
cvtColor(captureFrame, grayscaleFrame, CV_BGR2GRAY);
imshow("Grayscale", grayscaleFrame);
equalizeHist(grayscaleFrame, grayscaleFrame);
//p_strStorage = cvCreateMemStorage(0);
//create a vector array to store the face found
std::vector<Rect> faces;
//find faces and store them in the vector array
face_cascade.detectMultiScale(grayscaleFrame, faces, 1.1, 3, CV_HAAR_FIND_BIGGEST_OBJECT|CV_HAAR_SCALE_IMAGE, Size(30,30));
//draw a circle for all found faces in the vector array on the original image
//for(int i = 0; i < faces.size(); i++)
int i = 0;
//for( int i = 0; i < faces.size(); i++ )
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
Scalar color = colors[i%8];
center.x = cvRound((r->x + r->width*0.5));
center.y = cvRound((r->y + r->height*0.5));
Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
Point pt2(faces[i].x, faces[i].y);
int radius;
int X = faces[i].x;
int Y = faces[i].y;
radius = cvRound((faces[i].width + faces[i].height)*0.25);
//ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
//rectangle(captureFrame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
circle(captureFrame,center,radius,cvScalar(0, 255, 0, 0), 1, 8, 0);
cout << "X:" << faces[i].x << " Y:" << faces[i].y << endl;
if (radius >= 85)
{
outputChars[0] = 'a';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout << "radius >= 85, advertising begin" << endl;
//FlushFileBuffers(hSerial);
}
else if (radius <= 84)
{
outputChars[0] = 'b';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
//cout << "radius >= 85, advertising begin" << endl;
}
/*else if (radius >= 85 | X<=164 && X>=276)
{
outputChars[0] = 'z';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout << "radius >= 85, advertising begin" << endl;
//FlushFileBuffers(hSerial);
}
else if (radius < 85)
{
outputChars[0] = 'b';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout << "radius: " << radius << "radius < 85, advertising end!" << endl;
//FlushFileBuffers(hSerial);
}
/*if (X>=165 | X<=275)
{
outputChars[0]='u';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout <<"Face in the middle of the frame" << endl;
FlushFileBuffers(hSerial);
}
/*if (X<=164 | X>=276)
{
outputChars[0]='y';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout <<"no face in the middle of the frame" << endl;
//FlushFileBuffers(hSerial);
}*/
}
//print the output
imshow("FaceDetection", captureFrame);
//pause for 200ms
waitKey(60);
}
cvDestroyWindow("FaceDetection");
cvDestroyWindow("Grayscale");
FlushFileBuffers(hSerial);
// This closes the Serial Port
CloseHandle(hSerial);
return 0;
}
please help me, and thank you all for the attention.