I change and get of vector elements.
So as the thread is running, i use a mutex to change the vector's elements.
but if i just want to enumerate the vector's elements do i still have to lock my vector?
Here is the code:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
using namespace std;
using namespace cv;
std::mutex facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url)
: Napi::AsyncWorker(callback), url(url) {
}
~FaceDetectWorker() {
}
vector<Rect> detectAndDraw( Mat& img, CascadeClassifier& cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
equalizeHist( smallImg, smallImg );
t = (double)getTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
t = (double)getTickCount() - t;
printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
while(running) {
capture >> frame;
if( frame.empty()) {
continue;
}
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectAndDraw( frame1, cascade);
facesMutex.lock();
faces = facesResult;
facesMutex.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(30));
}
} catch (std::exception &e) {
facesMutex.unlock();
Napi::AsyncWorker::SetError(e.what());
}
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>().ToString();
Napi::Function callback = info[1].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
facesMutex.lock();
for(int i = 0; i < faces.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
facesMutex.unlock();
return faceArray;
}
So the questionsis, if i use the FaceDetectGet, which only enumerating the vector, should I lock and unlock it?
Actually the solution was to use a shared mutex.
The code looks like this:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
#include <mutex> // For std::unique_lock
#include <shared_mutex>
// https://stackoverflow.com/questions/55313194/do-i-have-to-lock-a-vectors-that-i-just-enumerate-or-i-only-do-it-when-i-change?noredirect=1#comment97357425_55313194
using namespace std;
using namespace cv;
std::shared_mutex _facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url, int skip, int sleep)
: Napi::AsyncWorker(callback), url(url), skip(skip), sleep(sleep) {
}
~FaceDetectWorker() {
}
vector<Rect> detectFaces(Mat &img, CascadeClassifier &cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
//resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
//equalizeHist( gray, smallImg );
//t = (double)getTickCount();
cascade.detectMultiScale( gray, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
//t = (double)getTickCount() - t;
//printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
running = true;
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
int skipCount = 0;
while(running) {
//capture.read(frame);
capture >> frame;
if( frame.empty()) {
continue;
}
skipCount++;
//cout<< "sleep " << sleep << " skip " << skip << endl;
if (skipCount >= skip) {
//cout<< "calculation " << skipCount << endl;
skipCount = 0;
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectFaces(frame1, cascade);
std::unique_lock lock(_facesMutex);
faces = facesResult;
lock.unlock();
}
//waitKey(250);
std::this_thread::sleep_for(std::chrono::milliseconds(sleep));
}
} catch (std::exception &e) {
Napi::AsyncWorker::SetError(e.what());
}
} else {
Napi::AsyncWorker::SetError("ERROR: Could not open video camera " + url);
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
int skip = 3;
int sleep = 30;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>();
Napi::Number skip = info[1].As<Napi::Number>();
Napi::Number sleep = info[2].As<Napi::Number>();
Napi::Function callback = info[3].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url, skip, sleep);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
std::shared_lock lock(_facesMutex);
vector<Rect> faces2 = faces;
lock.unlock();
for(int i = 0; i < faces2.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
return faceArray;
}
void FaceDetectCancel(const Napi::CallbackInfo &info) {
running = false;
}
I took an OpenCV hand tracking algorithm
from here and made a dll from it. When I launch it as it is, it works good. But when I compile it as a dll and attach it to Unity, I get this ,Runtime Error, the program has requested the Runtime to terminate it in an unusual way
I put //crashes in trAck() function in algodll.cpp after lines which cause the Runtime Error
Here's what I got. Headers:
handGesture.hpp:
#ifndef _HAND_GESTURE_
#define _HAND_GESTURE_
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <vector>
#include <string>
#include "algo.hpp"
#include "myImage.hpp"
using namespace cv;
using namespace std;
class HandGesture{
public:
MyImage m;
HandGesture();
vector<vector<Point> > contours;
vector<vector<int> >hullI;
vector<vector<Point> >hullP;
vector<vector<Vec4i> > defects;
vector <Point> fingerTips;
Rect rect;
void printGestureInfo(Mat src);
int cIdx;
int frameNumber;
int mostFrequentFingerNumber;
int nrOfDefects;
Rect bRect;
double bRect_width;
double bRect_height;
bool isHand;
bool detectIfHand();
void initVectors();
void getFingerNumber(MyImage *m);
void eleminateDefects(MyImage *m);
void getFingerTips(MyImage *m);
void drawFingerTips(MyImage *m);
private:
string bool2string(bool tf);
int fontFace;
int prevNrFingerTips;
void checkForOneFinger(MyImage *m);
float getAngle(Point s,Point f,Point e);
vector<int> fingerNumbers;
void analyzeContours();
string intToString(int number);
void computeFingerNumber();
void drawNewNumber(MyImage *m);
void addNumberToImg(MyImage *m);
vector<int> numbers2Display;
void addFingerNumberToVector();
Scalar numberColor;
int nrNoFinger;
float distanceP2P(Point a,Point b);
void removeRedundantEndPoints(vector<Vec4i> newDefects,MyImage *m);
void removeRedundantFingerTips();
};
#endif
algo.hpp:
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#define ORIGCOL2COL CV_BGR2HLS
#define COL2ORIGCOL CV_HLS2BGR
#define NSAMPLES 7
#define PI 3.14159
extern "C"
{
__declspec(dllexport) void trAck();
}
#endif
myImage.hpp:
#ifndef _MYIMAGE_
#define _MYIMAGE_
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <vector>
using namespace cv;
using namespace std;
class MyImage{
public:
MyImage(int webCamera);
MyImage();
Mat srcLR;
Mat src;
Mat bw;
vector<Mat> bwList;
VideoCapture cap;
int cameraSrc;
void initWebCamera(int i);
};
#endif
roi.hpp:
#ifndef ROI
#define ROI
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
using namespace cv;
class My_ROI{
public:
My_ROI();
My_ROI(Point upper_corner, Point lower_corner,Mat src);
Point upper_corner, lower_corner;
Mat roi_ptr;
Scalar color;
int border_thickness;
void draw_rectangle(Mat src);
};
#endif
I will not put here code for stdafx.h and targetver.h, since they are standard Visual Studio-generated headers. My cpps:
algodll.cpp
#include "stdafx.h"
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include "myImage.hpp"
#include "roi.hpp"
#include "handGesture.hpp"
#include <vector>
#include <cmath>
#include "algo.hpp"
using namespace cv;
using namespace std;
/* Global Variables */
int fontFace = FONT_HERSHEY_PLAIN;
int square_len;
int avgColor[NSAMPLES][3] ;
int c_lower[NSAMPLES][3];
int c_upper[NSAMPLES][3];
int avgBGR[3];
int nrOfDefects;
int iSinceKFInit;
struct dim{int w; int h;}boundingDim;
VideoWriter out;
Mat edges;
My_ROI roi1, roi2,roi3,roi4,roi5,roi6;
vector <My_ROI> roi;
vector <KalmanFilter> kf;
vector <Mat_<float> > measurement;
/* end global variables */
void init(MyImage *m){
square_len=20;
iSinceKFInit=0;
}
// change a color from one space to another
void col2origCol(int hsv[3], int bgr[3], Mat src){
Mat avgBGRMat=src.clone();
for(int i=0;i<3;i++){
avgBGRMat.data[i]=hsv[i];
}
cvtColor(avgBGRMat,avgBGRMat,COL2ORIGCOL);
for(int i=0;i<3;i++){
bgr[i]=avgBGRMat.data[i];
}
}
void printText(Mat src, string text){
int fontFace = FONT_HERSHEY_PLAIN;
putText(src,text,Point(src.cols/2, src.rows/10),fontFace, 1.2f,Scalar(200,0,0),2);
}
void waitForPalmCover(MyImage* m){
m->cap >> m->src;
flip(m->src,m->src,1);
roi.push_back(My_ROI(Point(m->src.cols/3, m->src.rows/6),Point(m->src.cols/3+square_len,m->src.rows/6+square_len),m->src));
roi.push_back(My_ROI(Point(m->src.cols/4, m->src.rows/2),Point(m->src.cols/4+square_len,m->src.rows/2+square_len),m->src));
roi.push_back(My_ROI(Point(m->src.cols/3, m->src.rows/1.5),Point(m->src.cols/3+square_len,m->src.rows/1.5+square_len),m->src));
roi.push_back(My_ROI(Point(m->src.cols/2, m->src.rows/2),Point(m->src.cols/2+square_len,m->src.rows/2+square_len),m->src));
roi.push_back(My_ROI(Point(m->src.cols/2.5, m->src.rows/2.5),Point(m->src.cols/2.5+square_len,m->src.rows/2.5+square_len),m->src));
roi.push_back(My_ROI(Point(m->src.cols/2, m->src.rows/1.5),Point(m->src.cols/2+square_len,m->src.rows/1.5+square_len),m->src));
roi.push_back(My_ROI(Point(m->src.cols/2.5, m->src.rows/1.8),Point(m->src.cols/2.5+square_len,m->src.rows/1.8+square_len),m->src));
for(int i =0;i<50;i++){
m->cap >> m->src;
flip(m->src,m->src,1);
for(int j=0;j<NSAMPLES;j++){
roi[j].draw_rectangle(m->src);
}
string imgText=string("Cover rectangles with palm");
printText(m->src,imgText);
if(i==30){
// imwrite("./images/waitforpalm1.jpg",m->src);
}
imshow("img1", m->src);
out << m->src;
if(cv::waitKey(30) >= 0) break;
}
}
int getMedian(vector<int> val){
int median;
size_t size = val.size();
sort(val.begin(), val.end());
if (size % 2 == 0) {
median = val[size / 2 - 1] ;
} else{
median = val[size / 2];
}
return median;
}
void getAvgColor(MyImage *m,My_ROI roi,int avg[3]){
Mat r;
roi.roi_ptr.copyTo(r);
vector<int>hm;
vector<int>sm;
vector<int>lm;
// generate vectors
for(int i=2; i<r.rows-2; i++){
for(int j=2; j<r.cols-2; j++){
hm.push_back(r.data[r.channels()*(r.cols*i + j) + 0]) ;
sm.push_back(r.data[r.channels()*(r.cols*i + j) + 1]) ;
lm.push_back(r.data[r.channels()*(r.cols*i + j) + 2]) ;
}
}
avg[0]=getMedian(hm);
avg[1]=getMedian(sm);
avg[2]=getMedian(lm);
}
void average(MyImage *m){
m->cap >> m->src;
flip(m->src,m->src,1);
for(int i=0;i<30;i++){
m->cap >> m->src;
flip(m->src,m->src,1);
cvtColor(m->src,m->src,ORIGCOL2COL);
for(int j=0;j<NSAMPLES;j++){
getAvgColor(m,roi[j],avgColor[j]);
roi[j].draw_rectangle(m->src);
}
cvtColor(m->src,m->src,COL2ORIGCOL);
string imgText=string("Finding average color of hand");
printText(m->src,imgText);
imshow("img1", m->src);
if(cv::waitKey(30) >= 0) break;
}
}
void initTrackbars(){
for(int i=0;i<NSAMPLES;i++){
c_lower[i][0]=12;
c_upper[i][0]=7;
c_lower[i][1]=30;
c_upper[i][1]=40;
c_lower[i][2]=80;
c_upper[i][2]=80;
}
createTrackbar("lower1","trackbars",&c_lower[0][0],255);
createTrackbar("lower2","trackbars",&c_lower[0][1],255);
createTrackbar("lower3","trackbars",&c_lower[0][2],255);
createTrackbar("upper1","trackbars",&c_upper[0][0],255);
createTrackbar("upper2","trackbars",&c_upper[0][1],255);
createTrackbar("upper3","trackbars",&c_upper[0][2],255);
}
void normalizeColors(MyImage * myImage){
// copy all boundries read from trackbar
// to all of the different boundries
for(int i=1;i<NSAMPLES;i++){
for(int j=0;j<3;j++){
c_lower[i][j]=c_lower[0][j];
c_upper[i][j]=c_upper[0][j];
}
}
// normalize all boundries so that
// threshold is whithin 0-255
for(int i=0;i<NSAMPLES;i++){
if((avgColor[i][0]-c_lower[i][0]) <0){
c_lower[i][0] = avgColor[i][0] ;
}if((avgColor[i][1]-c_lower[i][1]) <0){
c_lower[i][1] = avgColor[i][1] ;
}if((avgColor[i][2]-c_lower[i][2]) <0){
c_lower[i][2] = avgColor[i][2] ;
}if((avgColor[i][0]+c_upper[i][0]) >255){
c_upper[i][0] = 255-avgColor[i][0] ;
}if((avgColor[i][1]+c_upper[i][1]) >255){
c_upper[i][1] = 255-avgColor[i][1] ;
}if((avgColor[i][2]+c_upper[i][2]) >255){
c_upper[i][2] = 255-avgColor[i][2] ;
}
}
}
void produceBinaries(MyImage *m){
Scalar lowerBound;
Scalar upperBound;
Mat foo;
for(int i=0;i<NSAMPLES;i++){
normalizeColors(m);
lowerBound=Scalar( avgColor[i][0] - c_lower[i][0] , avgColor[i][1] - c_lower[i][1], avgColor[i][2] - c_lower[i][2] );
upperBound=Scalar( avgColor[i][0] + c_upper[i][0] , avgColor[i][1] + c_upper[i][1], avgColor[i][2] + c_upper[i][2] );
m->bwList.push_back(Mat(m->srcLR.rows,m->srcLR.cols,CV_8U));
inRange(m->srcLR,lowerBound,upperBound,m->bwList[i]);
}
m->bwList[0].copyTo(m->bw);
for(int i=1;i<NSAMPLES;i++){
m->bw+=m->bwList[i];
}
medianBlur(m->bw, m->bw,7);
}
void initWindows(MyImage m){
namedWindow("trackbars",CV_WINDOW_KEEPRATIO);
namedWindow("img1",CV_WINDOW_FULLSCREEN);
}
void showWindows(MyImage m){
pyrDown(m.bw,m.bw);
pyrDown(m.bw,m.bw);
Rect roi( Point( 3*m.src.cols/4,0 ), m.bw.size());
vector<Mat> channels;
Mat result;
for(int i=0;i<3;i++)
channels.push_back(m.bw);
merge(channels,result);
result.copyTo( m.src(roi));
imshow("img1",m.src);
}
int findBiggestContour(vector<vector<Point> > contours){
int indexOfBiggestContour = -1;
int sizeOfBiggestContour = 0;
for (int i = 0; i < contours.size(); i++){
if(contours[i].size() > sizeOfBiggestContour){
sizeOfBiggestContour = contours[i].size();
indexOfBiggestContour = i;
}
}
return indexOfBiggestContour;
}
void myDrawContours(MyImage *m,HandGesture *hg){
drawContours(m->src,hg->hullP,hg->cIdx,cv::Scalar(200,0,0),2, 8, vector<Vec4i>(), 0, Point());
rectangle(m->src,hg->bRect.tl(),hg->bRect.br(),Scalar(0,0,200));
vector<Vec4i>::iterator d=hg->defects[hg->cIdx].begin();
int fontFace = FONT_HERSHEY_PLAIN;
vector<Mat> channels;
Mat result;
for(int i=0;i<3;i++)
channels.push_back(m->bw);
merge(channels,result);
// drawContours(result,hg->contours,hg->cIdx,cv::Scalar(0,200,0),6, 8, vector<Vec4i>(), 0, Point());
drawContours(result,hg->hullP,hg->cIdx,cv::Scalar(0,0,250),10, 8, vector<Vec4i>(), 0, Point());
while( d!=hg->defects[hg->cIdx].end() ) {
Vec4i& v=(*d);
int startidx=v[0]; Point ptStart(hg->contours[hg->cIdx][startidx] );
int endidx=v[1]; Point ptEnd(hg->contours[hg->cIdx][endidx] );
int faridx=v[2]; Point ptFar(hg->contours[hg->cIdx][faridx] );
float depth = v[3] / 256;
/*
line( m->src, ptStart, ptFar, Scalar(0,255,0), 1 );
line( m->src, ptEnd, ptFar, Scalar(0,255,0), 1 );
circle( m->src, ptFar, 4, Scalar(0,255,0), 2 );
circle( m->src, ptEnd, 4, Scalar(0,0,255), 2 );
circle( m->src, ptStart, 4, Scalar(255,0,0), 2 );
*/
circle( result, ptFar, 9, Scalar(0,205,0), 5 );
d++;
}
// imwrite("./images/contour_defects_before_eliminate.jpg",result);
}
void makeContours(MyImage *m, HandGesture* hg){
Mat aBw;
pyrUp(m->bw,m->bw);
m->bw.copyTo(aBw);
findContours(aBw,hg->contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
hg->initVectors();
hg->cIdx=findBiggestContour(hg->contours);
if(hg->cIdx!=-1){
// approxPolyDP( Mat(hg->contours[hg->cIdx]), hg->contours[hg->cIdx], 11, true );
hg->bRect=boundingRect(Mat(hg->contours[hg->cIdx]));
convexHull(Mat(hg->contours[hg->cIdx]),hg->hullP[hg->cIdx],false,true);
convexHull(Mat(hg->contours[hg->cIdx]),hg->hullI[hg->cIdx],false,false);
approxPolyDP( Mat(hg->hullP[hg->cIdx]), hg->hullP[hg->cIdx], 18, true );
if(hg->contours[hg->cIdx].size()>3 ){
convexityDefects(hg->contours[hg->cIdx],hg->hullI[hg->cIdx],hg->defects[hg->cIdx]);
hg->eleminateDefects(m);
}
bool isHand=hg->detectIfHand();
hg->printGestureInfo(m->src);
if(isHand){
hg->getFingerTips(m);
hg->drawFingerTips(m);
myDrawContours(m,hg);
}
}
}
//переименовать в функцию OnStart в моей опенСВ библиотеке всё до for(;;), остальное - в некую иную функцию, и запихнуть в апдейт. Всё как дллка
extern "C" {
void trAck() {
MyImage m(0);
HandGesture hg;
init(&m);
m.cap >> m.src;
namedWindow("img1", CV_WINDOW_KEEPRATIO);
out.open("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), 15, m.src.size(), true);
waitForPalmCover(&m);//crashes
average(&m);//crashes
destroyWindow("img1");
initWindows(m);
initTrackbars();
for (;;) {
hg.frameNumber++;
m.cap >> m.src;
flip(m.src, m.src, 1);
pyrDown(m.src, m.srcLR);//crashes
blur(m.srcLR, m.srcLR, Size(3, 3));
cvtColor(m.srcLR, m.srcLR, ORIGCOL2COL);//crashes
produceBinaries(&m);
cvtColor(m.srcLR, m.srcLR, COL2ORIGCOL);//crashes
makeContours(&m, &hg);//crashes
hg.getFingerNumber(&m);
showWindows(m);//crashes
out << m.src;
//imwrite("./images/final_result.jpg",m.src);
if (cv::waitKey(30) == char('q')) break;
}
destroyAllWindows();
out.release();
m.cap.release();
}
}
handGesture.cpp:
#include "stdafx.h"
#include "handGesture.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
using namespace cv;
using namespace std;
HandGesture::HandGesture(){
frameNumber=0;
nrNoFinger=0;
fontFace = FONT_HERSHEY_PLAIN;
}
void HandGesture::initVectors(){
hullI=vector<vector<int> >(contours.size());
hullP=vector<vector<Point> >(contours.size());
defects=vector<vector<Vec4i> > (contours.size());
}
void HandGesture::analyzeContours(){
bRect_height=bRect.height;
bRect_width=bRect.width;
}
string HandGesture::bool2string(bool tf){
if(tf)
return "true";
else
return "false";
}
string HandGesture::intToString(int number){
stringstream ss;
ss << number;
string str = ss.str();
return str;
}
void HandGesture::printGestureInfo(Mat src){
int fontFace = FONT_HERSHEY_PLAIN;
Scalar fColor(245,200,200);
int xpos=src.cols/1.5;
int ypos=src.rows/1.6;
float fontSize=0.7f;
int lineChange=14;
string info= "Figure info:";
putText(src,info,Point(ypos,xpos),fontFace,fontSize,fColor);
xpos+=lineChange;
info=string("Number of defects: ") + string(intToString(nrOfDefects)) ;
putText(src,info,Point(ypos,xpos),fontFace,fontSize ,fColor);
xpos+=lineChange;
info=string("bounding box height, width ") + string(intToString(bRect_height)) + string(" , ") + string(intToString(bRect_width)) ;
putText(src,info,Point(ypos,xpos),fontFace,fontSize ,fColor);
xpos+=lineChange;
info=string("Is hand: ") + string(bool2string(isHand));
putText(src,info,Point(ypos,xpos),fontFace,fontSize ,fColor);
}
bool HandGesture::detectIfHand(){
analyzeContours();
double h = bRect_height;
double w = bRect_width;
isHand=true;
if(fingerTips.size() > 5 ){
isHand=false;
}else if(h==0 || w == 0){
isHand=false;
}else if(h/w > 4 || w/h >4){
isHand=false;
}else if(bRect.x<20){
isHand=false;
}
return isHand;
}
float HandGesture::distanceP2P(Point a, Point b){
float d= sqrt(fabs( pow(a.x-b.x,2) + pow(a.y-b.y,2) )) ;
return d;
}
// remove fingertips that are too close to
// eachother
void HandGesture::removeRedundantFingerTips(){
vector<Point> newFingers;
for(int i=0;i<fingerTips.size();i++){
for(int j=i;j<fingerTips.size();j++){
if(distanceP2P(fingerTips[i],fingerTips[j])<10 && i!=j){
}else{
newFingers.push_back(fingerTips[i]);
break;
}
}
}
fingerTips.swap(newFingers);
}
void HandGesture::computeFingerNumber(){
std::sort(fingerNumbers.begin(), fingerNumbers.end());
int frequentNr;
int thisNumberFreq=1;
int highestFreq=1;
frequentNr=fingerNumbers[0];
for(int i=1;i<fingerNumbers.size(); i++){
if(fingerNumbers[i-1]!=fingerNumbers[i]){
if(thisNumberFreq>highestFreq){
frequentNr=fingerNumbers[i-1];
highestFreq=thisNumberFreq;
}
thisNumberFreq=0;
}
thisNumberFreq++;
}
if(thisNumberFreq>highestFreq){
frequentNr=fingerNumbers[fingerNumbers.size()-1];
}
mostFrequentFingerNumber=frequentNr;
}
void HandGesture::addFingerNumberToVector(){
int i=fingerTips.size();
fingerNumbers.push_back(i);
}
// add the calculated number of fingers to image m->src
void HandGesture::addNumberToImg(MyImage *m){
int xPos=10;
int yPos=10;
int offset=30;
float fontSize=1.5f;
int fontFace = FONT_HERSHEY_PLAIN;
for(int i=0;i<numbers2Display.size();i++){
rectangle(m->src,Point(xPos,yPos),Point(xPos+offset,yPos+offset),numberColor, 2);
putText(m->src, intToString(numbers2Display[i]),Point(xPos+7,yPos+offset-3),fontFace,fontSize,numberColor);
xPos+=40;
if(xPos>(m->src.cols-m->src.cols/3.2)){
yPos+=40;
xPos=10;
}
}
}
// calculate most frequent numbers of fingers
// over 20 frames
void HandGesture::getFingerNumber(MyImage *m){
removeRedundantFingerTips();
if(bRect.height > m->src.rows/2 && nrNoFinger>12 && isHand ){
numberColor=Scalar(0,200,0);
addFingerNumberToVector();
if(frameNumber>12){
nrNoFinger=0;
frameNumber=0;
computeFingerNumber();
numbers2Display.push_back(mostFrequentFingerNumber);
fingerNumbers.clear();
}else{
frameNumber++;
}
}else{
nrNoFinger++;
numberColor=Scalar(200,200,200);
}
addNumberToImg(m);
}
float HandGesture::getAngle(Point s, Point f, Point e){
float l1 = distanceP2P(f,s);
float l2 = distanceP2P(f,e);
float dot=(s.x-f.x)*(e.x-f.x) + (s.y-f.y)*(e.y-f.y);
float angle = acos(dot/(l1*l2));
angle=angle*180/PI;
return angle;
}
void HandGesture::eleminateDefects(MyImage *m){
int tolerance = bRect_height/5;
float angleTol=95;
vector<Vec4i> newDefects;
int startidx, endidx, faridx;
vector<Vec4i>::iterator d=defects[cIdx].begin();
while( d!=defects[cIdx].end() ) {
Vec4i& v=(*d);
startidx=v[0]; Point ptStart(contours[cIdx][startidx] );
endidx=v[1]; Point ptEnd(contours[cIdx][endidx] );
faridx=v[2]; Point ptFar(contours[cIdx][faridx] );
if(distanceP2P(ptStart, ptFar) > tolerance && distanceP2P(ptEnd, ptFar) > tolerance && getAngle(ptStart, ptFar, ptEnd ) < angleTol ){
if( ptEnd.y > (bRect.y + bRect.height -bRect.height/4 ) ){
}else if( ptStart.y > (bRect.y + bRect.height -bRect.height/4 ) ){
}else {
newDefects.push_back(v);
}
}
d++;
}
nrOfDefects=newDefects.size();
defects[cIdx].swap(newDefects);
removeRedundantEndPoints(defects[cIdx], m);
}
// remove endpoint of convexity defects if they are at the same fingertip
void HandGesture::removeRedundantEndPoints(vector<Vec4i> newDefects,MyImage *m){
Vec4i temp;
float avgX, avgY;
float tolerance=bRect_width/6;
int startidx, endidx, faridx;
int startidx2, endidx2;
for(int i=0;i<newDefects.size();i++){
for(int j=i;j<newDefects.size();j++){
startidx=newDefects[i][0]; Point ptStart(contours[cIdx][startidx] );
endidx=newDefects[i][1]; Point ptEnd(contours[cIdx][endidx] );
startidx2=newDefects[j][0]; Point ptStart2(contours[cIdx][startidx2] );
endidx2=newDefects[j][1]; Point ptEnd2(contours[cIdx][endidx2] );
if(distanceP2P(ptStart,ptEnd2) < tolerance ){
contours[cIdx][startidx]=ptEnd2;
break;
}if(distanceP2P(ptEnd,ptStart2) < tolerance ){
contours[cIdx][startidx2]=ptEnd;
}
}
}
}
// convexity defects does not check for one finger
// so another method has to check when there are no
// convexity defects
void HandGesture::checkForOneFinger(MyImage *m){
int yTol=bRect.height/6;
Point highestP;
highestP.y=m->src.rows;
vector<Point>::iterator d=contours[cIdx].begin();
while( d!=contours[cIdx].end() ) {
Point v=(*d);
if(v.y<highestP.y){
highestP=v;
cout<<highestP.y<<endl;
}
d++;
}int n=0;
d=hullP[cIdx].begin();
while( d!=hullP[cIdx].end() ) {
Point v=(*d);
cout<<"x " << v.x << " y "<< v.y << " highestpY " << highestP.y<< "ytol "<<yTol<<endl;
if(v.y<highestP.y+yTol && v.y!=highestP.y && v.x!=highestP.x){
n++;
}
d++;
}if(n==0){
fingerTips.push_back(highestP);
}
}
void HandGesture::drawFingerTips(MyImage *m){
Point p;
int k=0;
for(int i=0;i<fingerTips.size();i++){
p=fingerTips[i];
putText(m->src,intToString(i),p-Point(0,30),fontFace, 1.2f,Scalar(200,200,200),2);
circle( m->src,p, 5, Scalar(100,255,100), 4 );
}
}
void HandGesture::getFingerTips(MyImage *m){
fingerTips.clear();
int i=0;
vector<Vec4i>::iterator d=defects[cIdx].begin();
while( d!=defects[cIdx].end() ) {
Vec4i& v=(*d);
int startidx=v[0]; Point ptStart(contours[cIdx][startidx] );
int endidx=v[1]; Point ptEnd(contours[cIdx][endidx] );
int faridx=v[2]; Point ptFar(contours[cIdx][faridx] );
if(i==0){
fingerTips.push_back(ptStart);
i++;
}
fingerTips.push_back(ptEnd);
d++;
i++;
}
if(fingerTips.size()==0){
checkForOneFinger(m);
}
}
myImage.cpp:
#include "stdafx.h"
#include "myImage.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
using namespace cv;
MyImage::MyImage(){
}
MyImage::MyImage(int webCamera){
cameraSrc=webCamera;
cap=VideoCapture(webCamera);
}
roi.cpp:
#include "stdafx.h"
#include "myImage.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include "roi.hpp"
using namespace cv;
using namespace std;
My_ROI::My_ROI(){
upper_corner=Point(0,0);
lower_corner=Point(0,0);
}
My_ROI::My_ROI(Point u_corner, Point l_corner, Mat src){
upper_corner=u_corner;
lower_corner=l_corner;
color=Scalar(0,255,0);
border_thickness=2;
roi_ptr=src(Rect(u_corner.x, u_corner.y, l_corner.x-u_corner.x,l_corner.y-u_corner.y));
}
void My_ROI::draw_rectangle(Mat src){
rectangle(src,upper_corner,lower_corner,color,border_thickness);
}
again, dllmain.cpp and stdafx.cpp are standard stuff so I do not put it here.
My Unity Script:
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using System.Runtime.InteropServices;
using UnityEngine.UI;
public class bs : MonoBehaviour
{
[DllImport("algodll")]
public static extern void trAck();
// public float speed;
public WebCamTexture webcamTexture;
Renderer renderer;
// Use this for initialization
void Start()
{
webcamTexture = new WebCamTexture();
renderer = GetComponent<Renderer>();
renderer.material.mainTexture = webcamTexture;
webcamTexture.Play();
}
// Update is called once per frame
void Update()
{
if (webcamTexture.isPlaying)
{
trAck();
}
}
}
I tried to remove all the code inside the trAck() function and simply increment some variable i to test whether it crashes due to an infinite loop, but it didn't give this error, it just froze the Unity. Also, putting trAck() into Start() did not help.