write string continously using fprintf - c++

My project is tracking object using opencv and send the coordinate to arduino as tx and read the data using another arduino (rx) with 'SoftwareSerial'. There are no problem with object tracking, knowing the coordinate, and communication between 2 arduino. The problem is i can not send the coordinate while 'tracker' running, but when I close the 'tracker', the data start appearing in the serial com.
opencv code
using namespace cv;
using namespace std;
//default capture width and height
int FRAME_WIDTH = 320; //640 320
int FRAME_HEIGHT = 240; //480 240
int MIN_OBJECT_AREA = 10*10;
int iLowH = 16;
int iHighH = 104;
int iLowS = 110;
int iHighS = 164;
int iLowV = 63;
int iHighV = 255;
int centerX, centerY;
int Xg,Yg;
int Modefilter = 1;
FILE *fp;
bool kirim=false;
string intToString(int number){
std::stringstream ss;
ss << number;
return ss.str();
}
void detect(){
}
int main( int argc, char** argv ){
//open serial
FILE* serial = fopen("\\\\.\\COM3", "w+");
if (serial == 0) {
printf("Failed to open serial port\n");
}
//capture the video from web cam
VideoCapture cap(0);
// if not success, exit program
if ( !cap.isOpened() ){
cout << "Cannot open the web cam" << endl;
return -1;
}
//set height and width of capture frame
cap.set(CV_CAP_PROP_FRAME_WIDTH,FRAME_WIDTH);
cap.set(CV_CAP_PROP_FRAME_HEIGHT,FRAME_HEIGHT);
//create a window called "Control"
namedWindow("Control", CV_WINDOW_AUTOSIZE);
//Create trackbars in "Control" window
cvCreateTrackbar("LowH", "Control", &iLowH, 179); //Hue (0 - 179)
cvCreateTrackbar("HighH", "Control", &iHighH, 179);
cvCreateTrackbar("LowS", "Control", &iLowS, 255); //Saturation (0 - 255)
cvCreateTrackbar("HighS", "Control", &iHighS, 255);
cvCreateTrackbar("LowV", "Control", &iLowV, 255); //Value (0 - 255)
cvCreateTrackbar("HighV", "Control", &iHighV, 255);
string XX,YY,parser1,parser2,result;
while (serial!=0){
Mat imgOriginal;
bool bSuccess = cap.read(imgOriginal); // read a new frame from video
if (!bSuccess){ //if not success, break loop
cout << "Cannot read a frame from video stream" << endl;
break;
}
//Convert the captured frame from BGR to HSV
Mat imgHSV;
cvtColor(imgOriginal, imgHSV, COLOR_BGR2HSV);
//find center point
centerX = FRAME_WIDTH/2;
centerY = FRAME_HEIGHT/2;
putText(imgOriginal, "Tekan", Point(5,10), FONT_HERSHEY_COMPLEX, 0.35, Scalar(0, 255, 0), 0.25, 8);
putText(imgOriginal, "a : Mulai Mengikuti Objek", Point(5,20), FONT_HERSHEY_COMPLEX, 0.35, Scalar(0, 255, 0), 0.25, 8);
putText(imgOriginal, "b : Berhenti Mengikuti Objek", Point(5,30), FONT_HERSHEY_COMPLEX, 0.35, Scalar(0, 255, 0), 0.25, 8);
//create cross line
line(imgOriginal,Point(centerX, centerY-20), Point(centerX, centerY+20), Scalar(0,255,0), 1.5);
line(imgOriginal,Point(centerX-20, centerY), Point(centerX+20, centerY), Scalar(0,255,0), 1.5);
//Threshold the image
Mat imgThresholded;
inRange(imgHSV, Scalar(iLowH, iLowS, iLowV), Scalar(iHighH, iHighS, iHighV), imgThresholded);
//morphological opening (remove small objects from the foreground)
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
//morphological closing (fill small holes in the foreground)
dilate( imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
erode(imgThresholded, imgThresholded, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)) );
//these two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
Mat imgContour;
imgThresholded.copyTo(imgContour);
//find contours of filtered image using openCV findContours function
findContours(imgContour,contours,hierarchy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE );
//use moments method to find our filtered object
double refArea = 0;
if (hierarchy.size() > 0) {
int numObjects = hierarchy.size();
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
if(area>MIN_OBJECT_AREA){ //jika area kontur lebih besar dari minimum area object maka gambar lingkaran dan tulis koordinat
kirim=true;
double x = moment.m10/area;
double y = moment.m01/area;
double r = sqrt(area/3.14);
Xg=(int) x;
Yg=(int) y;
circle(imgOriginal, Point(x,y), r, Scalar(0,0,255), 1.5, 8);
line(imgOriginal, Point(x,y-r-5), Point(x,y+r+5), Scalar(0,0,255), 1.5, 8);
line(imgOriginal, Point(x-r-5,y), Point(x+r+5,y), Scalar(0,0,255), 1.5, 8);
putText(imgOriginal, intToString(x) + "," + intToString(y), Point(x,y+10), FONT_HERSHEY_COMPLEX, 0.25, Scalar(0, 255, 0), 0.3, 8);
// send x,y coordinate to arduino
parser1="*"; parser2="!";
ostringstream xxx,yyy ;
xxx << Xg;
yyy << Yg;
XX=xxx.str(); YY=yyy.str();
result=parser1+XX+parser2+YY;
cout << result << endl;
fprintf(serial, "%s\n", result.c_str());
fflush(serial);
}//end if
}//end for
}//end if
//show the thresholded image
Mat dstimgThresholded;
resize(imgThresholded, dstimgThresholded, Size(), 2, 2, INTER_CUBIC);
imshow("Thresholded Image", dstimgThresholded);
//show the original image
Mat dstimgOriginal;
resize(imgOriginal, dstimgOriginal, Size(), 2, 2, INTER_CUBIC);
imshow("Original", dstimgOriginal);
///send data
if (waitKey(5) == 27) {//wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
cout << "esc key is pressed by user" << endl;
break;
}
} //end while
return 0;
}
tx arduino
#include <SoftwareSerial.h>
SoftwareSerial SWsend(2, 3); // (rx,tx)
String data;
void setup() {
// put your setup code here, to run once:
Serial.begin(115200);
SWsend.begin(4800);
//pinMode(12,INPUT_PULLUP);
}
void Send_SSIDandPWD_ToESP01() {
while (!SWsend) {
; // wait for serial port to connect.
Serial.println(F("wait for serial port to connect."));
}
while (Serial.available()) {
data = Serial.readString();
//data = Serial.read();
Serial.print(F("Send:"));
Serial.println(data);
SWsend.print(data);
}
}
void loop() {
// put your main code here, to run repeatedly:
Send_SSIDandPWD_ToESP01();
delay(25);
}
rx arduino
#include <SoftwareSerial.h>
SoftwareSerial SWrecv(2, 3); //(rx,tx)
String strSSID = ""; // a string to hold incoming data
String strPWD = "";
bool keepSSID = false;
bool keepPWD = false;
boolean stringComplete = false; // whether the string is complete
void setup() {
// initialize serial:
Serial.begin(115200);
SWrecv.begin(4800);
// Turn on the blacklight and print a message.
Serial.println(F("Hello, world!"));
}
void loop() {
// print the string when a newline arrives:
SWrecvEvent();
if (stringComplete) {
Serial.println("X:" + strSSID + ", Y:" + strPWD);
// clear the string:
// inputString = "";
if (strSSID == "")
{
Serial.println("SSID:not config");
}
strSSID = ""; // a string to hold incoming data
strPWD = "";
stringComplete = false;
}
}
void SWrecvEvent() {
while (SWrecv.available()) {
// get the new byte:
char inChar = (char)SWrecv.read();
//Serial.print(inChar); ///////////////////string asli
// add it to the inputString:
switch (inChar ) {
case '*':
{
keepSSID = true;
keepPWD = false;
}
break;
case '!':
{
keepSSID = false;
keepPWD = true;
}
break;
default:
{
if (inChar == '\n') {
stringComplete = true;
keepSSID = false;
keepPWD = false;
return;
}
if (keepSSID == true )
{
strSSID += inChar;
}
else if ( keepPWD == true )
{
strPWD += inChar;
}
}
break;
}
}
}
what should I do to write coordinate countinously while the tracker is running?

It could be that the IO is being buffered, and only flushed when you exit your tx program. Try calling Flush to force the data to send.

Related

How reduce false detection in my code and how to improve tracking accuracy with opencv in c++? [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 10 months ago.
Improve this question
**Requirements: **
(1) Build OpenCV with Cuda and compile in C++
(2) Version = OpenCV Latest Version
(3) Build and Compile OpenCV Link: https://techawarey.com/programming/install-opencv-c-c-in-ubuntu-18-04-lts-step-by-step-guide/#Summary
(4) Library samples_utility is here: https://github.com/opencv/opencv_contrib/blob/master/modules/tracking/samples/samples_utility.hpp
(4) Compile Program Command: g++ test.cpp -o testoutput -std=c++11 'pkg-config --cflags --libs opencv'
(5) Run Program Command: ./testoutput
Code is working fine but not accurate
Step: 1
Read Frame from Camera
Select ROI(Region of Interest)
After that start KCF tracker with Sobal Features Extractor
Tracking the selected object.
Step: 2
Failure detect
After that call template matching function called MatchingMethod()
Run template matching
Get x, y value from template matching
After that reinitialize KCF tracker with Sobal Features Extractor.
This code is fine for still object when the object is moving the tracker false detection. I want to improve accuracy and reduce false detection.
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include <cstring>
#include <unistd.h>
#include "sample_utility.hpp"
#include <thread>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/cudaarithm.hpp>
#include <iomanip>
#include <stdlib.h>
#include <unistd.h>
//////////////////////////////
using namespace cv;
using namespace std;
////////////////////////////
// Convert to string
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
/// Global Variables
struct Array {
int arr[2];
};
Mat img;
Mat templ;
Mat result_h;
bool flag = true;
int match_method = 5;
int i=0;
int max_Trackbar = 5;
float fps;
int seconds = 0;
// Function Headers
void delay();
// prototype of the functino for feature extractor
void sobelExtractor(const Mat img, const Rect roi, Mat& feat);
struct Array MatchingMethod( int, void* );
int main(int argc, char **argv)
{
TrackerKCF::Params param;
param.compress_feature = true;
param.compressed_size = 2;
param.desc_npca = 0;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.detect_thresh = 0.8;
// create a tracker object
Ptr<TrackerKCF> tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
VideoCapture cap(0);
// Exit if video is not opened
if(!cap.isOpened())
{
//cout << "Could not read video file" << endl;
return 1;
}
// Read first frame
Mat frame;
bool ok = cap.read(frame);
// Define initial bounding box
//Rect bbox(x, y, w, h);
// Uncomment the line below to select a different bounding box
Rect bbox = selectROI(frame, false);
// Display bounding box.
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////
int H, W, cW, cH;
// print(f"hight {H} , Width {W}")
H = display_height;
W = display_width;
// Center point of the screen
cW = int(W / 2);
cH = int(H / 2);
Point p1(cW, cH);
// get bounding box
Mat imCrop = frame(bbox);
imwrite("1.png", imCrop);
//quit if ROI was not selected
if(bbox.width==0 || bbox.height==0)
return 0;
//////////////////////////
//imshow("Tracking", frame);
tracker->init(frame, bbox);
while(true)
{
Mat frame;
cap >> frame;
circle(frame, p1, 3, Scalar(0,255,0), -1);
// Start timer
if(bbox.width!=0 || bbox.height!=0){
double timer = (double)getTickCount();
// Update the tracking result
/////////////////////////////////////
bool ok = tracker->update(frame, bbox);
//////////////////////////////////////
//ok, bbox = tracker->update(frame);
// Calculate Frames per second (FPS)
fps = getTickFrequency() / ((double)getTickCount() - timer);
if (ok)
{
// Tracking success : Draw the tracked object
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////////////////////////////
int xxx, yyy, height, width;
xxx = bbox.x;
yyy = bbox.y;
height = bbox.height;
width = bbox.width;
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xxx + (xxx + width)) / 2);
cyROI = int((yyy + (yyy + height)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
putText(frame, "FPS : " + SSTR(int(fps)), Point(100,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50), 2);
putText(frame, "Difference From X-Axis: "+SSTR(int(diffX)), Point(100, 50), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
putText(frame, "Difference From Y-Axis: "+SSTR(int(diffY)), Point(100, 80), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
}
else
{
// Tracking failure detected.
putText(frame, "Tracking failure detected", Point(100,110), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
templ = imread( "1.png", 1 );
img=frame.clone();
struct Array a = MatchingMethod( 0, 0 );
cout<<"X: "<<a.arr[0]<<"\n";
cout<<"Y: "<<a.arr[1]<<"\n";
cout<<"Width: "<<w<<"\n";
cout<<"Height: "<<h<<"\n";
int xx, yy, ww, hh;
xx = a.arr[0];
yy = a.arr[1];
ww = w;
hh = h;
Rect bbox(xx, yy, ww, hh);
tracker.release();
tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
tracker->init(frame, bbox);
//roi.x = MatchingMethod.
//waitKey(30);
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
////////////////////////////////////////////////////////////////////////
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xx + (xx + ww)) / 2);
cyROI = int((yy + (yy + hh)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
///////////////////////////////////////////////////////////////////////////
}
}
else{
}
// Display frame.
imshow("Tracking", frame);
// Exit if ESC pressed.
int k = waitKey(1);
if(k == 27)
{
break;
}
}
return 0;
}
///////////////////////
void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
Mat sobel[2];
Mat patch;
Rect region=roi;
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
cvtColor(patch,patch, COLOR_BGR2GRAY);
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-roi.y;
addBottom=(roi.height+roi.y>img.rows?roi.height+roi.y-img.rows:0);
addLeft=region.x-roi.x;
addRight=(roi.width+roi.x>img.cols?roi.width+roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
}
////////////////////////////////////////////////////
struct Array MatchingMethod( int, void* )
{
/// Source image to display
Mat frame;
struct Array a;
/////////
for(int i=1; i<=4; i++){
img.copyTo( frame );
// break;
//}
//////////////////////////
cv::cuda::setDevice(0); // initialize CUDA
// convert from mat to gpumat
cv::cuda::GpuMat image_d(img);
cv::cuda::GpuMat templ_d(templ);
cv::cuda::GpuMat result;
// GPU -> NG
cv::Ptr<cv::cuda::TemplateMatching> alg =
cv::cuda::createTemplateMatching(image_d.type(), cv::TM_CCOEFF_NORMED);
alg->match(image_d, templ_d, result); // no return.
//cv::cuda::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1);
double max_value, min_value;
cv::Point location;
cv::cuda::minMaxLoc(result, &min_value, &max_value, 0, &location);
/////////////////////////
double THRESHOLD = 3e-09; //0.3;
if( min_value <= THRESHOLD) {
//struct Array a;
a.arr[0] = location.x;
a.arr[1] = location.y;
cout<<"Hi"<<endl;
}
}
if(flag==true){
return a;
flag = false;
}
//}
}
Okey here is my answer to your question.
First of all, you are making a mistake by applying template matching when the tracker misses. Because template matching matches the feature if and only if it is totally same with the reference feature. So in your case, there will be shadows, light issues etc. in the environment, and you will never be able to get success results.
Secondly, if you delete the template matching scope, tracker will continue to search the target in the image effectively. Which changements I did in your code is listed below. With these changes, I got better results:
Delete the template matching scope
Decrease the detection threshold(param.detect_thresh) to 0.5
Create more tracker objects to catch the target: This change is the most important part. What I am suggesting is that create more and more tracker objects(in my case I did 4 tracker objects, but you can increase the number). Each tracker should get as input rectangle similar to ROI user chose but not the same coordinates. For example, if user chooses cv::Rect(200,200,400,400) then other tracker should get target as cv::Rect(180,190,400,400) , cv::Rect(220,180,400,400) ... and so on. Why you should do it because, tracker algorithm is feature based, so it will always try to get a similar features to the reference. By doing this, you will increase the feature references.
And here is my code to guide you:
#include <opencv2/core/utility.hpp>
#include <opencv2/tracking.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/core/ocl.hpp>
#include <iostream>
#include <cstring>
#include <unistd.h>
#include <thread>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/cudaarithm.hpp>
#include <iomanip>
#include <stdlib.h>
#include <unistd.h>
//////////////////////////////
using namespace cv;
using namespace std;
////////////////////////////
// Convert to string
#define SSTR( x ) static_cast< std::ostringstream & >( \
( std::ostringstream() << std::dec << x ) ).str()
/// Global Variables
struct Array {
int arr[2];
};
Mat img;
Mat templ;
Mat result_h;
bool flag = true;
int match_method = 5;
int i=0;
int max_Trackbar = 5;
float fps;
int seconds = 0;
// Function Headers
void delay();
// prototype of the functino for feature extractor
void sobelExtractor(const Mat img, const Rect roi, Mat& feat);
struct Array MatchingMethod( int, void* );
int main(int argc, char **argv)
{
TrackerKCF::Params param;
param.compress_feature = true;
param.compressed_size = 2;
param.desc_npca = 0;
param.desc_pca = TrackerKCF::GRAY | TrackerKCF::CN;
param.detect_thresh = 0.5;
// create a tracker object
Ptr<TrackerKCF> tracker = TrackerKCF::create(param);
tracker->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker2 = TrackerKCF::create(param);
tracker2->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker3 = TrackerKCF::create(param);
tracker3->setFeatureExtractor(sobelExtractor);
Ptr<TrackerKCF> tracker4 = TrackerKCF::create(param);
tracker4->setFeatureExtractor(sobelExtractor);
VideoCapture cap(0);
// Exit if video is not opened
if(!cap.isOpened())
{
//cout << "Could not read video file" << endl;
return 1;
}
cv::imshow("Tracking",0);
// Read first frame
Mat frame;
bool ok = cap.read(frame);
// Define initial bounding box
//Rect bbox(x, y, w, h);
// Uncomment the line below to select a different bounding box
Rect2d bbox = selectROI(frame, false);
// Display bounding box.
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////
int H, W, cW, cH;
// print(f"hight {H} , Width {W}")
H = frame.rows;
W = frame.cols;
// Center point of the screen
cW = int(W / 2);
cH = int(H / 2);
Point p1(cW, cH);
//quit if ROI was not selected
if(bbox.width==0 || bbox.height==0)
return 0;
//////////////////////////
//imshow("Tracking", frame);
tracker->init(frame, bbox);
tracker2->init(frame, cv::Rect2d(bbox.x-10,bbox.y-10, bbox.width,bbox.height));
tracker3->init(frame, cv::Rect2d(bbox.x+10,bbox.y+10, bbox.width,bbox.height));
tracker4->init(frame, cv::Rect2d(bbox.x+20,bbox.y+20, bbox.width,bbox.height));
while(true)
{
Mat frame;
cap >> frame;
circle(frame, p1, 3, Scalar(0,255,0), -1);
// Start timer
if(bbox.width!=0 || bbox.height!=0){
double timer = (double)getTickCount();
// Update the tracking result
/////////////////////////////////////
bool ok = tracker->update(frame, bbox);
bool ok2 = tracker->update(frame, bbox);
bool ok3 = tracker->update(frame, bbox);
bool ok4 = tracker->update(frame, bbox);
//////////////////////////////////////
//ok, bbox = tracker->update(frame);
// Calculate Frames per second (FPS)
fps = getTickFrequency() / ((double)getTickCount() - timer);
if (ok || ok2 || ok3 || ok4)
{
// Tracking success : Draw the tracked object
rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
///////////////////////////////////////////////////
int xxx, yyy, height, width;
xxx = bbox.x;
yyy = bbox.y;
height = bbox.height;
width = bbox.width;
int diffX, diffY;
float cxROI, cyROI;
cxROI = int((xxx + (xxx + width)) / 2);
cyROI = int((yyy + (yyy + height)) / 2);
diffX = cxROI - cW;
diffY = cH - cyROI;
//cout<<diffX<<"\n";
//cout<<diffY<<"\n";
Point p(cxROI, cyROI);
circle(frame, p, 3, Scalar(128,0,0), -1);
putText(frame, "FPS : " + SSTR(int(fps)), Point(100,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50), 2);
putText(frame, "Difference From X-Axis: "+SSTR(int(diffX)), Point(100, 50), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
putText(frame, "Difference From Y-Axis: "+SSTR(int(diffY)), Point(100, 80), FONT_HERSHEY_SIMPLEX, 0.6, Scalar(100, 200, 200), 2);
}
}
else{
}
// Display frame.
imshow("Tracking", frame);
// Exit if ESC pressed.
int k = waitKey(1);
if(k == 27)
{
break;
}
}
return 0;
}
///////////////////////
void sobelExtractor(const Mat img, const Rect roi, Mat& feat){
Mat sobel[2];
Mat patch;
Rect region=roi;
// extract patch inside the image
if(roi.x<0){region.x=0;region.width+=roi.x;}
if(roi.y<0){region.y=0;region.height+=roi.y;}
if(roi.x+roi.width>img.cols)region.width=img.cols-roi.x;
if(roi.y+roi.height>img.rows)region.height=img.rows-roi.y;
if(region.width>img.cols)region.width=img.cols;
if(region.height>img.rows)region.height=img.rows;
patch=img(region).clone();
cvtColor(patch,patch, COLOR_BGR2GRAY);
// add some padding to compensate when the patch is outside image border
int addTop,addBottom, addLeft, addRight;
addTop=region.y-roi.y;
addBottom=(roi.height+roi.y>img.rows?roi.height+roi.y-img.rows:0);
addLeft=region.x-roi.x;
addRight=(roi.width+roi.x>img.cols?roi.width+roi.x-img.cols:0);
copyMakeBorder(patch,patch,addTop,addBottom,addLeft,addRight,BORDER_REPLICATE);
Sobel(patch, sobel[0], CV_32F,1,0,1);
Sobel(patch, sobel[1], CV_32F,0,1,1);
merge(sobel,2,feat);
feat=feat/255.0-0.5; // normalize to range -0.5 .. 0.5
}
////////////////////////////////////////////////////
struct Array MatchingMethod( int, void* )
{
/// Source image to display
Mat frame;
struct Array a;
/////////
for(int i=1; i<=4; i++){
img.copyTo( frame );
// break;
//}
//////////////////////////
cv::cuda::setDevice(0); // initialize CUDA
// convert from mat to gpumat
cv::cuda::GpuMat image_d(img);
cv::cuda::GpuMat templ_d(templ);
cv::cuda::GpuMat result;
// GPU -> NG
cv::Ptr<cv::cuda::TemplateMatching> alg =
cv::cuda::createTemplateMatching(image_d.type(), cv::TM_CCOEFF_NORMED);
alg->match(image_d, templ_d, result); // no return.
//cv::cuda::normalize(result, result, 0, 1, cv::NORM_MINMAX, -1);
double max_value, min_value;
cv::Point location;
cv::cuda::minMaxLoc(result, &min_value, &max_value, 0, &location);
/////////////////////////
double THRESHOLD = 3e-09; //0.3;
if( min_value <= THRESHOLD) {
//struct Array a;
a.arr[0] = location.x;
a.arr[1] = location.y;
cout<<"Hi"<<endl;
}
}
if(flag==true){
return a;
flag = false;
}
//}
}

Speed up Serial Communication to Arduino c++

First time posting so I apologize.
I am trying to make a ball and beam project using a camera as the sensor and trying to get the serial communication to work.
The code currently works if I set the waitKey to (1000) but that is not fast enough for the project to work and was wanting some help speeding it up.
the output tends to only read the first string that I send then ignores anything else until the program is paused by holding the slider on the output screen for a couple of seconds.
I have tried to use a faster baud rate but the problem still persists.
This is the C++ code
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <vector>
#include "SerialPort.h"
#include <string.h>
#include <stdlib.h>
#include <string>
using namespace cv;
using namespace std;
//global for the image processing
Mat imgGray, imgCanny, imgBlur, imgDil;
Mat imgHSV, mask;
//int hmin = 53, smin = 102, vmin = 35; //green
//int hmax = 93, smax = 253, vmax = 169;
/* //green ball
int hmin = 41, smin = 93, vmin = 49;
int hmax = 93, smax = 243, vmax = 192; //p
*/
//purple ball
int hmin = 117, smin = 26, vmin = 63;
int hmax = 179, smax = 113, vmax = 169;
//global for the serial
char output[MAX_DATA_LENGTH];
char incomingData[MAX_DATA_LENGTH];
char port[] = " ////.//COM3";
int PosX;
// this will convert the output into the correct string because the "stoi" function doesn't work as intended
String intToString(int I)
{
String J;
if (I ==80 )
{
J ="80" ;
}
else if (I ==81 )
{
J = "81";
}
else if (I ==82 )
{
J = "82";
}
else if (I == 83)
{
J = "83";
}
else if (I ==84 )
{
J = "84";
}
else if (I == 85)
{
J = "85";
}
else if (I == 86)
{
J = "86";
}
else if (I ==87 )
{
J = "87";
}
else if (I ==88 )
{
J = "88";
}
else if (I ==89 )
{
J = "89";
}
else if (I ==90 )
{
J = "90";
}
else if (I ==91 )
{
J = "91";
}
else if (I == 92)
{
J = "92";
}
else if (I == 93)
{
J = "93";
}
else if (I == 94)
{
J = "94";
}
else if (I == 95)
{
J = "95";
}
else if (I == 96)
{
J = "96";
}
else if (I == 97)
{
J = "97";
}
else if (I == 98)
{
J = "98";
}
else if (I == 99)
{
J = "99";
}
else if (I == 100)
{
J = "100";
}
else if (I > 100)
{
J = "100";
}
else if (I < 80)
{
J = "80";
}
return (J);
}
// calculation to make the output between outMin and outMax taken from arduino website and transformed into calculation
int map(int x,int in_min, int in_max, int out_min, int out_max)
{
return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;
}
// just the P of the PID controller
int PCONTROL(int xPos)
{
int P, Kp = 1, Proportional, error;
error = 240 - xPos; //240 is the middle of the camera and xPos is the current position
Proportional = Kp * error;
P = map(Proportional,-240,240,80,100); // calculation to make the output between 80&100 taken from arduino website and transformed into calculation
return (P);
}
// this is the function made to be the PID contol loop
int PIDCONTROL(int xPos)
{
int error_prior = 0, error, integral_prior = 0, Kp, Ki,
Kd, integral, derivative, proportional, outPut, PID_total, period = 50;
// I have set the Kp and Ki to zero when testing the P contoller
Kp = 1;
Ki = 0;
Kd = 0;
//error = (img.rows/2) - xPos; // 640 is the desired Value
error = (480/2) - xPos; // this is if I got the wrong part of the resolution
proportional = Kp * error;
integral = integral_prior + error;
derivative = (error - error_prior)/period;
PID_total = ((Kp * proportional) + (Ki * integral) + (Kd * derivative)); //this is the calculation for the PID controller
error_prior = error;
integral_prior = integral;
outPut = map(PID_total,-240,240,80,100);
cout << "output = " << outPut << endl;
return (outPut);
}
// this code will get the x and y values of the ball, draw a box around it and lable it
// this is what i need for the project
void getContours(Mat imgDil, Mat img)
{
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
string objectType;
string x, y;
findContours(imgDil, contours, hierarchy, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
//drawContours(img, contours, -1, Scalar(255, 0, 255), 2); //draws everything
for (int i = 0; i < contours.size(); i++)
{
int area = contourArea(contours[i]);
//cout << area << endl; //displays the area of the ball
vector<vector<Point>>conPoly(contours.size());
vector<Rect>boundRect(contours.size());
if (area > 1000 && area < 5000) //filters out smaller and larger objects
{
float peri = arcLength(contours[i], true);
approxPolyDP(contours[i], conPoly[i], 0.02 * peri, true);
drawContours(img, contours, i, Scalar(255, 0, 255), 2); // with a filter applyed
boundRect[i] = boundingRect(conPoly[i]); //draw a box around the ball
rectangle(img, boundRect[i].tl(), boundRect[i].br(), Scalar(0, 255, 0), 5);
putText(img, "Ball", { boundRect[i].x,boundRect[i].y - 5 }, FONT_HERSHEY_PLAIN, 1, Scalar(0, 69, 255), 2); //display text above the box saying ball
//cout << "X = " << boundRect[i].x << endl;// << "Y = " << boundRect[i].y << endl; //print the x and y values
PosX = boundRect[i].x;
}
}
}
void main()
{
VideoCapture cap(0);
Mat img;
string xposSrg;
//int TempData;
Mat Reimg, Reimg2;
string data;
//char TrackbarName[50];
const int alpha_slider_max = 100;
int xPos, preData = 0;
createTrackbar("Hue Min", "Trackbars", &hmin, 179);
SerialPort arduino(port);
if (arduino.isConnected()) {
cout << "Connection" << endl << endl;
}
else {
cout << "Error Connecting" << endl << endl;
}
while (arduino.isConnected()) // no point do it if not connected
{
cap.read(img);
//prepocessing
Mat imgGray, imgCanny, imgBlur,Reimg,Reimg2;
//cvtColor(img, imgGray, COLOR_BGR2GRAY);
//pyrDown(img, Reimg, Size(img.cols / 2, img.rows / 2)); // downsample for faster processing
cvtColor(img, imgHSV, COLOR_BGR2HSV); // get the big wierd colours
Scalar lower(hmin, smin, vmin);
Scalar upper(hmax, smax, vmax);
inRange(imgHSV, lower, upper, mask); // make only the green ball visible
GaussianBlur(mask, imgBlur, Size(3, 3), 3, 0);
Canny(imgBlur, imgCanny, 25, 75);
Mat kernel = getStructuringElement(MORPH_RECT, Size(3, 3));
dilate(imgCanny, imgDil, kernel);
getContours(imgDil, img); // use only the visible green to the contours fuction
//displaying processes used in debuging
/*
imshow("Ball", img);
imshow("Gray", imgGray);
imshow("Blur", imgBlur);
imshow("Canny", imgCanny);
imshow("Dil", imgDil);
*/
//cout << img.rows << endl;;
//display the orginal img with the contors drawn on to it
imshow("img", img);
// do the PID controller calculations
preData = PIDCONTROL(PosX);
//send the data to the arduino
data = intToString(preData);
cout << "Data =" << data << "." << endl;
char* charArray = new char[data.size() +1];
copy(data.begin(), data.end(), charArray);
charArray[data.size()] = '\n';
arduino.writeSerialPort(charArray, MAX_DATA_LENGTH);
arduino.readSerialPort(output, MAX_DATA_LENGTH);
cout << " Arduino >> " << output << "." << endl;
delete[] charArray;
waitKey(1); //work as fast as possible
// turns out this library can only output every second
}
}
Here is the Arduino code
#include <Servo.h>
Servo myservo;
String info;
void setup() {
Serial.begin(9600);
//pinMode(led,OUTPUT);
myservo.attach(9); // attaches the servo on pin 9 to the servo object
myservo.write(90); // set the starting position to 90 degrees(flat)
}
// this is the one for on using 10 degrees
void loop()
{
if (Serial.available() >0)
{
info = Serial.readStringUntil('\n');
}
Serial.flush();
//check what the imput is then change the servo level (every 10 degrees) and output the level
// checking the string other ways caused errors dunno why
if (info.equals("0"))
{
Serial.println("Not Receiving Data");
myservo.write(90);
}
else if (info.equals("80"))
{
Serial.println("80");
myservo.write(80);
}
else if (info.equals("81"))
{
Serial.println("81");
myservo.write(81);
}
else if (info.equals("82"))
{
Serial.println("82");
myservo.write(82);
}
else if (info.equals("83"))
{
Serial.println("83");
myservo.write(83);
}
else if (info.equals("84"))
{
Serial.println("84");
myservo.write(84);
}
else if (info.equals("85"))
{
Serial.println("85");
myservo.write(85);
}
else if (info.equals("86"))
{
Serial.println("86");
myservo.write(86);
}
else if (info.equals("87"))
{
Serial.println("87");
myservo.write(87);
}
else if (info.equals("88"))
{
Serial.println("88");
myservo.write(88);
}
else if (info.equals("89"))
{
Serial.println("89");
myservo.write(89);
}
else if (info.equals("90"))
{
Serial.println("90");
myservo.write(90);
}
else if (info.equals("91"))
{
Serial.println("91");
myservo.write(91);
}
else if (info.equals("92"))
{
Serial.println("92");
myservo.write(92);
}
else if (info.equals("93"))
{
Serial.println("93");
myservo.write(93);
}
else if (info.equals("94"))
{
Serial.println("94");
myservo.write(94);
}
else if (info.equals("95"))
{
Serial.println("95");
myservo.write(95);
}
else if (info.equals("96"))
{
Serial.println("96");
myservo.write(96);
}
else if (info.equals("97"))
{
Serial.println("97");
myservo.write(97);
}
else if (info.equals("98"))
{
Serial.println("98");
myservo.write(98);
}
else if (info.equals("99"))
{
Serial.println("99");
myservo.write(99);
}
else if (info.equals("100"))
{
Serial.println("100");
myservo.write(100);
}
}
Here is the headers file taken from https://www.youtube.com/watch?v=8BWjyZxGr5o
#ifndef SERIALPORT_H
#define SERIALPORT_H
#define ARDUINO_WAIT_TIME 2000
#define MAX_DATA_LENGTH 256
#include <windows.h>
#include <stdio.h>
#include <stdlib.h>
class SerialPort
{
private:
HANDLE handler;
bool connected;
COMSTAT status;
DWORD errors;
public:
SerialPort(char* portName);
~SerialPort();
int readSerialPort(char* buffer, unsigned int buf_size);
bool writeSerialPort(char* buffer, unsigned int buf_size);
bool isConnected();
};
#endif // SERIALPORT_H
#include "SerialPort.h"
SerialPort::SerialPort(char* portName)
{
this->connected = false;
this->handler = CreateFileA(static_cast<LPCSTR>(portName),
GENERIC_READ | GENERIC_WRITE,
0,
NULL,
OPEN_EXISTING,
FILE_ATTRIBUTE_NORMAL,
NULL);
if (this->handler == INVALID_HANDLE_VALUE) {
if (GetLastError() == ERROR_FILE_NOT_FOUND) {
printf("ERROR: Handle was not attached. Reason: %s not available\n", portName);
}
else
{
printf("ERROR!!!");
}
}
else {
DCB dcbSerialParameters = { 0 };
if (!GetCommState(this->handler, &dcbSerialParameters)) {
printf("failed to get current serial parameters");
}
else {
dcbSerialParameters.BaudRate = CBR_9600;
dcbSerialParameters.ByteSize = 8;
dcbSerialParameters.StopBits = ONESTOPBIT;
dcbSerialParameters.Parity = NOPARITY;
dcbSerialParameters.fDtrControl = DTR_CONTROL_ENABLE;
if (!SetCommState(handler, &dcbSerialParameters))
{
printf("ALERT: could not set Serial port parameters\n");
}
else {
this->connected = true;
PurgeComm(this->handler, PURGE_RXCLEAR | PURGE_TXCLEAR);
Sleep(ARDUINO_WAIT_TIME);
}
}
}
}
SerialPort::~SerialPort()
{
if (this->connected) {
this->connected = false;
CloseHandle(this->handler);
}
}
int SerialPort::readSerialPort(char* buffer, unsigned int buf_size)
{
DWORD bytesRead;
unsigned int toRead = 0;
ClearCommError(this->handler, &this->errors, &this->status);
if (this->status.cbInQue > 0) {
if (this->status.cbInQue > buf_size) {
toRead = buf_size;
}
else toRead = this->status.cbInQue;
}
if (ReadFile(this->handler, buffer, toRead, &bytesRead, NULL)) return bytesRead;
return 0;
}
bool SerialPort::writeSerialPort(char* buffer, unsigned int buf_size)
{
DWORD bytesSend;
if (!WriteFile(this->handler, (void*)buffer, buf_size, &bytesSend, 0)) {
ClearCommError(this->handler, &this->errors, &this->status);
return false;
}
else return true;
}
bool SerialPort::isConnected()
{
return this->connected;
}

segmentation fault error in Opencv c++

I am trying to code a program on opencv to decide whether a human has approached ahead the camera. After I run the execution file, I get the captured video for few seconds and encounter the segmentation fault error.
The code is like this
Here are headers:
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
//define static variable
static int cApp = 0;//number of approached frame
static double last = 0;
//define functions
void detectAndDisplay( Mat frame );
bool computeArea( double width, double height, double lastArea);
double runningAverage(int M);
//define opencv function and classifier
String upperbody_cascade_name = "home/pi/opencv- 3.0.0/data/haarcascades/haarcascade_upperbody.xml";
CascadeClassifier upper_cascade;
String window_name = "Capture - upper body detection";
Here is the main function:
int main( void )
{
//define variable
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
upper_cascade.load("/home/pi/opencv-3.0.0/data/haarcascades/haarcascade_upperbody.xml");
//-- 2. Read the video stream
capture.open( -1 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
char c = (char)waitKey(10);
if( c == 27 ) { break; } // escape
}
capture.release();
return 0;
}
Here is the detectAndDisplay function:
void detectAndDisplay( Mat frame )
{
std::vector<Rect> upperbodys;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect upperbodys
upper_cascade.detectMultiScale( frame_gray, upperbodys, 1.05, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
Point center( upperbodys[0].x + upperbodys[0].width/2, upperbodys[0].y + upperbodys[0].height/2 );
ellipse( frame, center, Size( upperbodys[0].width/2, upperbodys[0].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
bool ifApproached = computeArea(upperbodys[0].width/2, upperbodys[0].height/2, last);
if (ifApproached == true) {
cApp++;
}
if (cApp == 3) {
cout << "have approached" << endl;
cApp = cApp - 3;
}
//-- Show what you got
imshow( window_name, frame );
}
Here is the computeArea function:
bool computeArea( double width, double height, double lastArea) {
double newArea = width * height;
bool ifApproached = false;
//double presentArea = newArea;
double presentArea = runningAverage(newArea);
double DifferenceBewteenAreas = presentArea - lastArea;
if (DifferenceBewteenAreas > 1) {//threshold
ifApproached = true;
}
last = presentArea;
return ifApproached;
}
Here is runningAverage function:
double runningAverage(int M) {
//M is measurement
//#define LM_SIZE 5
static int LM[5];
static int index =0;
static long sum = 0;
static int count =0;
//keep sum updated to improve speed
sum = sum - LM[index];
LM[index] = M;
sum = sum + LM[index];
index++;
index = index % 5;
if (count < 5) {
count++;
}
return (double)(sum / (double)count);
}
I have searched many opencv segmentation fault questions, some said this segmentation fault was caused by wrong array used, but my case has little use of array. Others said misused of function characters could also cause this kind of errors, I agree with this, some of my characters could be wrong here.
Actually I found that I should not use upperbodys[0] in the code, because sometimes there are no object being detected at all,so there could be some memory read error happens, I used upperbodys[i] instead and it works well then.
void detectAndDisplay( Mat frame )
{
std::vector<Rect> upperbodys;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect upperbodys
upper_cascade.detectMultiScale( frame_gray, upperbodys, 1.05, 3, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
int size = upperbodys.size();
double newArea = -1;
for (int i = 0 ; i < size; i++) {
Point center( upperbodys[i].x + upperbodys[i].width/2, upperbodys[i].y + upperbodys[i].height/2 );
ellipse( frame, center, Size( upperbodys[i].width/2, upperbodys[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
//bool ifApproached = computeArea(upperbodys[i].width/2, upperbodys[i].height/2, last);
//////////////////////////////////////////
newArea = upperbodys[i].width/2 * upperbodys[i].height/2;
if (newArea != -1) {
cout << "UpperBodys has value, index = " << i << endl;
break;
}
}
bool ifApproached = false;
//double presentArea = runningAverage(newArea);
double presentArea = newArea;
double DifferenceBewteenAreas = presentArea - last;
if (DifferenceBewteenAreas > 1) {//threshold
ifApproached = true;
}
last = presentArea;
//////////////////////////////////////////
if (ifApproached == true) {
cApp++;
}
if (cApp == 3) {
cout << "have approached" << endl;
cApp = cApp - 3;
}
//-- Show what you got
imshow( window_name, frame );
}

How to display the final decision for face detection using opencv

I am a beginner of using opencv. I using opencv 2.4 with VS2013. I had develop face detection code and success but the decision made is from frame by frame. How can I made final decision by combining all the frame or frame averaging?? For example, when the total face detected is 90% detected, the final decision is 'FACE DETECTED' and vice versa.
Here is my code:
int main(int argc, char** argv)
{
CvCapture* capture;
capture = cvCaptureFromFile("C:/Users/user/Desktop/Face and Motion/45.avi");
//assert(capture != NULL); //terminate if capture is NULL
IplImage* frame;
while (true)
{
frame = cvQueryFrame(capture);
if (!frame)
break;
cvShowImage("original", frame); //show
CvMemStorage* storage = cvCreateMemStorage(0);
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad("C:/opencv2410/sources/data/haarcascades/haarcascade_frontalface_alt.xml");
cvClearMemStorage(storage);
CvSeq* faces = cvHaarDetectObjects(frame, cascade, storage, 1.1, 4, 0, cvSize(40, 50));
CvRect* r;
if (faces) //change from (!faces) to (faces)
{
for (int i = 0; i < (faces ? faces->total : 0); i++)
{
r = (CvRect*)cvGetSeqElem(faces, i);
//cvRectangle(frame, cvPoint(100, 50), cvPoint(200, 200), CV_RGB(255, 0, 0), 5, 8);
cvRectangle(frame, cvPoint(r->x, r->y), cvPoint(r->x + r->width, r->y + r->height), CV_RGB(255, 0, 0));
cvShowImage("RESULTS", frame);
char c = cvWaitKey(1000);
}
}
else
{
cvShowImage("RESULT", frame);
}
printf("%d Face Found !\n", faces ? faces->total : 0);
if ((faces ? faces->total : 0) == 0)
{
printf("FACE NOT DETECTED !\n\n");
}
else
{
printf("FACE DETECTED !\n\n");
}
}
return (0);
}
you need two variables like
int frame_count = 0;
int detected_face_count = 0;
you should increase frame_count for every frame and increase detected_face_count when a face detected. finally detected_face_count / frame_count gives your desired value.
however, i did some corrections on your code (i could not test it)
changes explained by comments. i hope it will be useful.
int main(int argc, char** argv)
{
CvCapture* capture;
capture = cvCaptureFromFile("C:/Users/user/Desktop/Face and Motion/45.avi");
//assert(capture != NULL); //terminate if capture is NULL
IplImage* frame;
//you need two variables
int frame_count = 0;
int detected_face_count = 0;
// these lines should not be in the loop
CvMemStorage* storage = cvCreateMemStorage(0);
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad("C:/opencv2410/sources/data/haarcascades/haarcascade_frontalface_alt.xml");
while (true)
{
frame = cvQueryFrame(capture);
if (!frame)
break;
frame_count++; // increase frame_count
cvShowImage("original", frame); //show
cvClearMemStorage(storage);
CvSeq* faces = cvHaarDetectObjects(frame, cascade, storage, 1.1, 4, 0, cvSize(40, 50));
CvRect* r;
for (int i = 0; i < (faces ? faces->total : 0); i++)
{
r = (CvRect*)cvGetSeqElem(faces, i);
//cvRectangle(frame, cvPoint(100, 50), cvPoint(200, 200), CV_RGB(255, 0, 0), 5, 8);
cvRectangle(frame, cvPoint(r->x, r->y), cvPoint(r->x + r->width, r->y + r->height), CV_RGB(255, 0, 0));
}
cvShowImage("RESULT", frame);
char c = cvWaitKey(1000);
printf("%d Face Found !\n", faces ? faces->total : 0);
if ((faces ? faces->total : 0) == 0)
{
printf("FACE NOT DETECTED !\n\n");
}
else
{
detected_face_count ++; // increase detected_face_count
printf("FACE DETECTED !\n\n");
}
}
if( (float)(detected_face_count / frame_count) > 0.89 )
{
printf("FACES DETECTED over %90 of frames!\n\n");
}
return (0);
}
as #Miki stated you should not use deprecated C api
take a look at the code below using C++ api (tested with OpenCV 3.1)
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
VideoCapture capture("C:/Users/user/Desktop/Face and Motion/45.avi");
if(!capture.isOpened())
{
printf("Video file could not be opened !\n");
return -1;
}
Mat frame;
//you need two variables
int frame_count = 0;
int detected_face_count = 0;
// these lines should not be in the loop
CascadeClassifier cascade;
if( !cascade.load( "C:/opencv2410/sources/data/haarcascades/haarcascade_frontalface_alt.xml" ) )
{
cerr << "ERROR: Could not load classifier cascade" << endl;
return -1;
}
while (true)
{
capture >>frame;
if (frame.empty())
break;
frame_count++; // increase frame_count
imshow("original", frame); //show
vector<Rect> faces;
cascade.detectMultiScale(frame, faces,1.1, 4, 0, Size(40, 50));
for (int i = 0; i < faces.size(); i++)
{
rectangle(frame, faces[i], Scalar(0, 0, 255));
}
imshow("RESULT", frame);
char c = waitKey(10);
printf("%d Face Found !\n", faces.size());
if ( faces.size() == 0 )
{
printf("FACE NOT DETECTED !\n\n");
}
else
{
detected_face_count ++; // increase detected_face_count
printf("FACE DETECTED !\n\n");
}
}
printf("count of frames : %d \n", frame_count);
printf("count of frames has detected face : %d \n", detected_face_count);
printf("Face Found %d percent of frames!\n", (int)(100 * detected_face_count / frame_count));
if( (float)(detected_face_count / frame_count) > 0.89 )
{
printf("FACES DETECTED over %90 of frames!\n\n");
}
return (0);
}

face detection (how to detect the circle around the face that we've detected)

my name is budi.
i am a newbie in image processing, recently i've been trying to learn about opencv and visual studio. i already succeed to detect the face then draw the circle around the face that i've detected thanks to the example in some DIY website. my question is, how to detect a circle that encircling the face ? so i can use it for "if" condition, for example
if ( condition )//there is at least one circle in the frame
{
bla bla bla
}
here is the code i use :
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/ocl/ocl.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/opencv_modules.hpp>
#include <opencv2/videostab/deblurring.hpp>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <Windows.h>
#include <sstream>
#include <string>
using namespace std;
using namespace cv;
const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(0,128,255),
CV_RGB(0,255,255),
CV_RGB(0,255,0),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)
} ;
void Draw(Mat& img, vector<Rect>& faces, double scale);
int main(int argc, const char** argv)
{
// Setup serial port connection and needed variables.
HANDLE hSerial = CreateFile(L"COM8", GENERIC_READ | GENERIC_WRITE, 0, 0, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, 0);
if (hSerial !=INVALID_HANDLE_VALUE)
{
printf("Port opened! \n");
DCB dcbSerialParams;
GetCommState(hSerial,&dcbSerialParams);
dcbSerialParams.BaudRate = CBR_9600;
dcbSerialParams.ByteSize = 8;
dcbSerialParams.Parity = NOPARITY;
dcbSerialParams.StopBits = ONESTOPBIT;
//CvMemStorage* p_strStorage;
char incomingData[256] = ""; // don't forget to pre-allocate memory
//printf("%s\n",incomingData);
int dataLength = 256;
int readResult = 0;
SetCommState(hSerial, &dcbSerialParams);
}
else
{
if (GetLastError() == ERROR_FILE_NOT_FOUND)
{
printf("Serial port doesn't exist! \n");
}
printf("Error while setting up serial port! \n");
}
char outputChars[] ="c" ;
DWORD btsIO;
//void Draw(Mat& img, vector<Rect>& faces, double scale);
Mat frame, frameCopy, image;
//int i; // loop counter
//char charCheckForEscKey; // char for checking key press (Esc exits program)
//create the cascade classifier object used for the face detection
CascadeClassifier face_cascade;
//use the haarcascade_frontalface_alt.xml library
face_cascade.load("haarcascade_frontalface_alt.xml");
//setup video capture device and link it to the first capture device
VideoCapture captureDevice;
captureDevice.open(0);
if(captureDevice.open(0) == NULL)
{ // if capture was not successful . . .
printf("error: capture error \n"); // error message to standard out . . .
getchar(); // getchar() to pause for user see message . . .
return(-1);
}
//setup image files used in the capture process
Mat captureFrame;
Mat grayscaleFrame;
//create a window to present the results
namedWindow("FaceDetection", 1);
//int servoPosition = 90;
//int servoOrientation = 0;
//int servoPosition1=90;
//int servoOrientation1=0;
//create a loop to capture and find faces
while(true)
{
//p_imgOriginal = captureFrame;
//capture a new image frame
captureDevice>>captureFrame;
//convert captured image to gray scale and equalize
cvtColor(captureFrame, grayscaleFrame, CV_BGR2GRAY);
imshow("Grayscale", grayscaleFrame);
equalizeHist(grayscaleFrame, grayscaleFrame);
//p_strStorage = cvCreateMemStorage(0);
//create a vector array to store the face found
std::vector<Rect> faces;
//find faces and store them in the vector array
face_cascade.detectMultiScale(grayscaleFrame, faces, 1.1, 3, CV_HAAR_FIND_BIGGEST_OBJECT|CV_HAAR_SCALE_IMAGE, Size(30,30));
//draw a circle for all found faces in the vector array on the original image
//for(int i = 0; i < faces.size(); i++)
int i = 0;
//for( int i = 0; i < faces.size(); i++ )
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
Scalar color = colors[i%8];
center.x = cvRound((r->x + r->width*0.5));
center.y = cvRound((r->y + r->height*0.5));
Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
Point pt2(faces[i].x, faces[i].y);
int radius;
int X = faces[i].x;
int Y = faces[i].y;
radius = cvRound((faces[i].width + faces[i].height)*0.25);
//ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 2, 8, 0 );
//rectangle(captureFrame, pt1, pt2, cvScalar(0, 255, 0, 0), 1, 8, 0);
circle(captureFrame,center,radius,cvScalar(0, 255, 0, 0), 1, 8, 0);
cout << "X:" << faces[i].x << " Y:" << faces[i].y << endl;
if (radius >= 85)
{
outputChars[0] = 'a';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout << "radius >= 85, advertising begin" << endl;
//FlushFileBuffers(hSerial);
}
else if (radius <= 84)
{
outputChars[0] = 'b';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
//cout << "radius >= 85, advertising begin" << endl;
}
/*else if (radius >= 85 | X<=164 && X>=276)
{
outputChars[0] = 'z';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout << "radius >= 85, advertising begin" << endl;
//FlushFileBuffers(hSerial);
}
else if (radius < 85)
{
outputChars[0] = 'b';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout << "radius: " << radius << "radius < 85, advertising end!" << endl;
//FlushFileBuffers(hSerial);
}
/*if (X>=165 | X<=275)
{
outputChars[0]='u';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout <<"Face in the middle of the frame" << endl;
FlushFileBuffers(hSerial);
}
/*if (X<=164 | X>=276)
{
outputChars[0]='y';
WriteFile(hSerial, outputChars, strlen(outputChars), &btsIO, NULL);
cout <<"no face in the middle of the frame" << endl;
//FlushFileBuffers(hSerial);
}*/
}
//print the output
imshow("FaceDetection", captureFrame);
//pause for 200ms
waitKey(60);
}
cvDestroyWindow("FaceDetection");
cvDestroyWindow("Grayscale");
FlushFileBuffers(hSerial);
// This closes the Serial Port
CloseHandle(hSerial);
return 0;
}
please help me, and thank you all for the attention.