I got the problem about timers which is
QObject::~QObject: Timers cannot be stopped from another thread
Below is the log data from my terminal.
$ ./boost_tutorials
init done
All done. Exit safely!
QObject::~QObject: Timers cannot be stopped from another thread
Here is my C++ code with thread from boost. I build it via CMakeLists.
#include <boost/thread.hpp>
#include <boost/chrono.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
class Robot {
public:
Robot() {
cap.open(-1);
SYSTEM_QUIT = false;
if (!cap.isOpened()) {
cout << "Error opening the video" << endl;
SYSTEM_QUIT = true;
}
}
~Robot() {
destroyAllWindows();
cout << "All done. Exit safely!" << endl;
}
void run() {
boost::thread* t1 = new boost::thread(boost::bind(&Robot::imageProcCallback, this));
boost::thread* t2 = new boost::thread(boost::bind(&Robot::simpleCallback, this));
t1->join();
t2->join();
}
void imageProcCallback() {
Mat frame;
int cc = 0;
while(!SYSTEM_QUIT) {
cap >> frame;
if (!msgs.empty()) {
Point pt(rand()%frame.cols, rand()%frame.rows);
putText(frame, msgs.back(), pt, cv::FONT_HERSHEY_COMPLEX, 2, Scalar(0, 255, 0));
}
imshow(filename, frame);
if (waitKey(5) == 27)
SYSTEM_QUIT = true;
}
frame.release();
}
void simpleCallback() {
while(!SYSTEM_QUIT) {
int count = rand() % 100;
std::string str = std::to_string(count);
msgs.push_back(str);
}
}
private:
string filename;
VideoCapture cap;
bool SYSTEM_QUIT;
std::vector<std::string> msgs;
};
void main() {
Robot robot;
robot.run();
}
Related
I change and get of vector elements.
So as the thread is running, i use a mutex to change the vector's elements.
but if i just want to enumerate the vector's elements do i still have to lock my vector?
Here is the code:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
using namespace std;
using namespace cv;
std::mutex facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url)
: Napi::AsyncWorker(callback), url(url) {
}
~FaceDetectWorker() {
}
vector<Rect> detectAndDraw( Mat& img, CascadeClassifier& cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
equalizeHist( smallImg, smallImg );
t = (double)getTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
t = (double)getTickCount() - t;
printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
while(running) {
capture >> frame;
if( frame.empty()) {
continue;
}
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectAndDraw( frame1, cascade);
facesMutex.lock();
faces = facesResult;
facesMutex.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(30));
}
} catch (std::exception &e) {
facesMutex.unlock();
Napi::AsyncWorker::SetError(e.what());
}
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>().ToString();
Napi::Function callback = info[1].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
facesMutex.lock();
for(int i = 0; i < faces.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
facesMutex.unlock();
return faceArray;
}
So the questionsis, if i use the FaceDetectGet, which only enumerating the vector, should I lock and unlock it?
Actually the solution was to use a shared mutex.
The code looks like this:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
#include <mutex> // For std::unique_lock
#include <shared_mutex>
// https://stackoverflow.com/questions/55313194/do-i-have-to-lock-a-vectors-that-i-just-enumerate-or-i-only-do-it-when-i-change?noredirect=1#comment97357425_55313194
using namespace std;
using namespace cv;
std::shared_mutex _facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url, int skip, int sleep)
: Napi::AsyncWorker(callback), url(url), skip(skip), sleep(sleep) {
}
~FaceDetectWorker() {
}
vector<Rect> detectFaces(Mat &img, CascadeClassifier &cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
//resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
//equalizeHist( gray, smallImg );
//t = (double)getTickCount();
cascade.detectMultiScale( gray, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
//t = (double)getTickCount() - t;
//printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
running = true;
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
int skipCount = 0;
while(running) {
//capture.read(frame);
capture >> frame;
if( frame.empty()) {
continue;
}
skipCount++;
//cout<< "sleep " << sleep << " skip " << skip << endl;
if (skipCount >= skip) {
//cout<< "calculation " << skipCount << endl;
skipCount = 0;
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectFaces(frame1, cascade);
std::unique_lock lock(_facesMutex);
faces = facesResult;
lock.unlock();
}
//waitKey(250);
std::this_thread::sleep_for(std::chrono::milliseconds(sleep));
}
} catch (std::exception &e) {
Napi::AsyncWorker::SetError(e.what());
}
} else {
Napi::AsyncWorker::SetError("ERROR: Could not open video camera " + url);
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
int skip = 3;
int sleep = 30;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>();
Napi::Number skip = info[1].As<Napi::Number>();
Napi::Number sleep = info[2].As<Napi::Number>();
Napi::Function callback = info[3].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url, skip, sleep);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
std::shared_lock lock(_facesMutex);
vector<Rect> faces2 = faces;
lock.unlock();
for(int i = 0; i < faces2.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
return faceArray;
}
void FaceDetectCancel(const Napi::CallbackInfo &info) {
running = false;
}
I'm trying to save a parameter from UART input to txt-File. Please help me to understand what is wrong! Parameter have a form from 0|0|0|0 to 999|999|999|999. Txt File "SAVE_DATA_LOG" stays empty.
Why?
I'm also trying to get a system-timecode next to values - how can I get it?
I'm also sending a strings like "MP11500" (already works).
#include <UART_PC.h>
#include <stdio.h>
#include <QFile>
#include <QTextStream>
//#include <ui_UART_PC.h>
#include <QDebug>
#include <QByteArray>
using namespace std;
// ************************************************************************************************
UART_PC::UART_PC() {
if (0 == connectSerial()) {
qDebug() << "Serial Ok!\n";
} else {
qDebug() << "Serial Error!\n";
return;
}
m_state = WAIT;
m_timer = new QTimer(this);
connect(m_timer, SIGNAL(timeout()), this, SLOT(processMeas()),Qt::QueuedConnection);
//m_timer->setInterval(1000);
m_timer->start(1000); //Timer 1 sec
m_cnt = 0;
m_values.setFileName(PWM_VALUES); //PWM - Values for motors
m_values.open(QIODevice::ReadOnly);
qDebug()<<"Start..";
QSerialPort *serialPort= new QSerialPort();
}
// ************************************************************************************************
UART_PC::~UART_PC() {
}
// ************************************************************************************************
uint32_t UART_PC::connectSerial(void) { //Serial init
m_serial.setPortName(SDEV);
m_serial.setBaudRate(QSerialPort::Baud9600);
m_serial.setDataBits(QSerialPort::Data8);
m_serial.setParity(QSerialPort::NoParity);
m_serial.setStopBits(QSerialPort::OneStop);
m_serial.setFlowControl(QSerialPort::NoFlowControl);
if (!m_serial.open(QIODevice::ReadWrite)) {
return 1;
}
connect(&m_serial, SIGNAL(readyRead()), this, SLOT(readRequest()),Qt::QueuedConnection);
return 0;
}
// ************************************************************************************************
void UART_PC::processMeas() { //State machine
qDebug()<<"Tick:"<<m_cnt;
if (m_state == WAIT) { //State 0 - Wait
m_cnt++;
if(m_cnt == 5){
m_state = SET_V1;
}
}
if (m_state == SET_V1) { //State 1 - Send
if(m_values.atEnd()){
m_values.seek(0);
}
if(!m_values.atEnd()){
QString v = m_values.readLine();
QStringList chunks = v.split(",");
QString cmd = "MP"+chunks.at(0)+chunks.at(1)+"\n\0";
m_serial.write(cmd.toUtf8());
m_serial.waitForBytesWritten(1000);
// qDebug()<<cmd<<v;
m_cnt = 0;
m_state = SET_V2;
// m_state = WAIT;
}
if (m_state == SET_V2){ // State 2 - receive and write
UART_PC::saveFile(QString::fromStdString(buffer.data()));
m_state = WAIT;
}
}
}
// ************************************************************************************************
void UART_PC::readRequest() { //Read and save
QMutexLocker lock(&m_mutex_serial);
m_serial_data += m_serial.readAll();
qDebug() << hex << m_serial_data;
UART_PC::saveFile(QString::fromStdString(m_serial_data.data()));
// buffer = m_serial_data;
m_serial_data.clear();
}
// ************************************************************************************************
void UART_PC::saveFile(QString buffer){ //Save
// qDebug() << "Ohlolololo"+buffer;
QString LOG_FILE = SAVE_DATA_LOG;
QFile file(LOG_FILE);
file.open(QIODevice::ReadWrite);
QTextStream stream(&file);
stream << buffer << endl;
file.close();
}
Thank you very much!
I read a program with MFC visual studio 2013 that load a video from file and the we can track a object. I set a stop button. it work for loading video and stop the loading. but I can't stop the running video in tracking. it's my gui program:
bool Cgui2Dlg::getImageFilePath()
{
m_inputVideo.GetWindowTextW(file_path_);
if (file_path_.GetLength() > 0)
return true;
else
{
AfxMessageBox(L"Please, select one video file or image sequence folder!");
//return false;
}
}
Size Cgui2Dlg::getWindowSizeByID(int IDC) {
return getWindowSize(GetDlgItem(IDC)->m_hWnd);
}
void Cgui2Dlg::openfile()
{
getImageFilePath();
CT2CA filePath2(file_path_);
string fp(filePath2);
VideoCapture capture(fp);
Mat frame;
if (!capture.isOpened())
throw "Error when reading steam_avi";
run_ = true;
///// show on the window frame
while (run_) {
capture >> frame;
if (frame.empty())
{
run_ = false;
break;
}
auto input_size = getWindowSizeByID(IDC_Video_Original);
view1_.create(input_size, CV_8UC3);
rect1_ = ZoomWithSpect(frame, view1_, cofi_, Scalar::all(0));
matToWinControl(view1_, &m_original_frame);
///
SendMessage(WM_MSG, 0, 0);
waitKey(400); // waits to display frame
this_thread::sleep_for(std::chrono::milliseconds(interval_));
}
if (run_) {
run_ = false;
OnBnClickedStop();
}
}
void Cgui2Dlg::OnBnClickedButton1()
{
//CFileDialog dlg(TRUE, _T("*.avi"), NULL,
//OFN_FILEMUSTEXIST | OFN_PATHMUSTEXIST | OFN_HIDEREADONLY, _T("video files (*.bmp; *.jpg; *.avi) |*.bmp;*.jpg;|All Files (*.*)|*.*||"), NULL);
//dlg.m_ofn.lpstrTitle = _T("Open Video");
//CString file_path_ = dlg.GetPathName(); // the selected filename // This is image pointer
setButtonsState(EcvButtonState::BUTTON1);
UpdateData(TRUE);
interval_ = 25;
std::thread t(&Cgui2Dlg::openfile, this);
t.detach();
}
void Cgui2Dlg::OnBnClickedButton3()
{
setButtonsState(EcvButtonState::BUTTON3);
UpdateData(TRUE);
interval_ = 200;
std::thread tt(&Cgui2Dlg::trackingfile, this);
tt.detach();
}
void Cgui2Dlg::trackingfile()
{
getImageFilePath();
CT2CA filePath2(file_path_);
string fp(filePath2);
run_ = true;
//VideoCapture capture(fp);
//Mat frame;
//capture >> frame;
setButtonsState(EcvButtonState::BUTTON3);
//check radio botton for choosing one algorithm
int checkradio = GetCheckedRadioButton(IDC_RADIO1, IDC_RADIO2);
switch (checkradio)
{
case IDC_RADIO1:
{
auto input_size = getWindowSizeByID(IDC_Video_Track);
view2_.create(input_size, CV_8UC3);
//rect1_ = ZoomWithSpect(frame, view1_, cofi_, Scalar::all(0));
//matToWinControl(view2_, &m_Track_frame);
//FCAMshift CAM(fp);//,run_);
//CAM.track(fp); //, run_);
FRunTracker CAM(fp);//,run_);
CAM.track(fp); //, run_);
//CAM.showresult();
///
//waitKey(40); // waits to display frame
//this_thread::sleep_for(std::chrono::milliseconds(interval_));
}
if (run_) {
run_ = false;
OnBnClickedStop();
}
break;
case IDC_RADIO2:
{
auto input_size = getWindowSizeByID(IDC_Video_Track);
view2_.create(input_size, CV_8UC3);
FCAMshift CAM(fp);//,run_);
CAM.track(fp); //, run_);
///
//waitKey(40); // waits to display frame
this_thread::sleep_for(std::chrono::milliseconds(interval_));
}
if (run_) {
run_ = false;
OnBnClickedStop();
}
break;
}
}
void Cgui2Dlg::setButtonsState(const EcvButtonState& state) {
switch (state)
{
case EcvButtonState::BUTTON1:
m_stop_btn.EnableWindow(TRUE);
m_Track_btn.EnableWindow(TRUE);
m_load_btn.EnableWindow(TRUE);
break;
case EcvButtonState::BUTTON3:
m_stop_btn.EnableWindow(TRUE);
m_Track_btn.EnableWindow(TRUE);
m_load_btn.EnableWindow(TRUE);
break;
case EcvButtonState::stop:
m_stop_btn.EnableWindow(TRUE);
m_Track_btn.EnableWindow(TRUE);
m_load_btn.EnableWindow(TRUE);
break;
}
//UpdateData(FALSE);
}
void Cgui2Dlg::OnBnClickedRadio1()
{
// TODO: Add your control notification handler code here
}
void Cgui2Dlg::OnBnClickedButton2()
{
run_ = false;
setButtonsState(EcvButtonState::stop);
}
void Cgui2Dlg::OnBnClickedRadio2()
{
// TODO: Add your control notification handler code here
}
void Cgui2Dlg::OnStnClickedVideoTrack()
{
}
void Cgui2Dlg::OnStnClickedVideoOriginal()
{
// TODO: Add your control notification handler code here
}
LRESULT Cgui2Dlg::onUpdateUI(WPARAM wparam, LPARAM lparam) {
matToWinControl(view1_, &m_original_frame);
//matToWinControl(view2_, &m_result_frame);
return 0;
}
void Cgui2Dlg::OnBnClickedStop()
{
run_ = false;
interval_ = 2000;
UpdateData(TRUE);
setButtonsState(EcvButtonState::stop);
}
and this is my tracking code:
/************************************************************************
* File: RunTracker.cpp
************************************************************************/
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "stdafx.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "CompressiveTracker.h"
#include "RunTracker.h"
using namespace cv;
using namespace std;
FRunTracker::FRunTracker(string x) //,bool y)
{
fp = x;
//run_= y;
}
Mat src2, img2, ROI2, image2;
Rect box(0, 0, 0, 0);
Point Pi1(0, 0);
Point Pi2(0, 0);
const char* winName2 = "compressive tracking demo";
bool click = false;
void showimage22() {
img2 = src2.clone();
//checkBoundary();
if (box.width>0 && box.height>0) {
ROI2 = src2(box);
//imshow("cropped", ROI2);
}
rectangle(img2, box, Scalar(0, 255, 0), 1, 8, 0);
imshow(winName2, img2);
}
void onMouse2(int event, int x, int y, int f, void*) {
switch (event) {
case CV_EVENT_LBUTTONDOWN:
click = true;
Pi1.x = x;
Pi1.y = y;
Pi2.x = x;
Pi2.y = y;
break;
case CV_EVENT_LBUTTONUP:
Pi2.x = x;
Pi2.y = y;
click = false;
//trackObject = -1;
break;
case CV_EVENT_MOUSEMOVE:
if (click) {
Pi2.x = x;
Pi2.y = y;
}
break;
default: break;
}
if (click) {
if (Pi1.x>Pi2.x) {
box.x = Pi2.x;
box.width = Pi1.x - Pi2.x;
}
else {
box.x = Pi1.x;
box.width = Pi2.x - Pi1.x;
}
if (Pi1.y>Pi2.y) {
box.y = Pi2.y;
box.height = Pi1.y - Pi2.y;
}
else {
box.y = Pi1.y;
box.height = Pi2.y - Pi1.y;
}
}
showimage22();
}
void FRunTracker::track(string fp)
{
VideoCapture input_video(fp);
input_video >> src2;
namedWindow(winName2, WINDOW_NORMAL);
setMouseCallback(winName2, onMouse2, NULL);
imshow(winName2, src2);
waitKey(0);
// CT framework
CompressiveTracker ct;
Mat grayimg2;
cvtColor(src2, grayimg2, CV_RGB2GRAY);
ct.init(grayimg2, box);
char strFrame[20];
//FILE* resultStream;
//resultStream = fopen("TrackingResults.txt", "w");
//fprintf(resultStream, "%i %i %i %i\n", (int)box.x, (int)box.y, (int)box.width, (int)box.height);
while (1) {
input_video >> src2;
if (src2.empty())
break;
cvtColor(src2, grayimg2, CV_RGB2GRAY);
ct.processFrame(grayimg2, box);// Process frame
rectangle(src2, box, Scalar(200, 0, 0), 2);// Draw rectangle
//fprintf(resultStream, "%i %i %i %i\n", (int)box.x, (int)box.y, (int)box.width, (int)box.height);
//sprintf(strFrame, "#%d ", i);
//putText(src2, strFrame, cvPoint(0, 20), 2, 1, CV_RGB(25, 200, 25));
imshow("Compressive Tracking", src2);// Display
waitKey(1);
}
//fclose(resultStream);
}
FRunTracker::~FRunTracker()
{
}
Your track function in FRunTracker class has a forever loop. Instead, put a boolean condition to check (like while(isRunning), etc) in the loop, and make the functions of your Cgui2Dlg class to change this condition. Don't forget to protect it with mutexes.
This is a class I use to spawn a HighGui window with some content on different thread.
class Capture {
private:
bool running;
std::thread thread;
cv::Mat background;
void loop() {
while (running) {
cv::imshow("sth",background);
cv::waitKey(settings::capture_wait_time);
}
}
public:
Capture() :
running {false},
thread {},
background { 800, 800, CV_8UC3, cv::Scalar{255,0,255}} {
cv::namedWindow("sth");
}
inline ~Capture() {
if (running) stop(); // stop and join the thread
cv::destroyWindow("sth");
}
void run() {
if (!running) {
running = true;
thread = std::thread{[this]{loop();}};
}
}
inline void join() { if (thread.joinable()) thread.join(); };
inline void stop() {
running = false;
if (thread.joinable()) thread.join();
}
};
// main
Capture cap;
cap.run();
// ...
The problem is that the window will always end up being black (in this case it should be purple). I am obviously missing something here....
It seems that you cannot create a window in another thread. Also, the way you're calling the member function on the other thread seems wrong.
Have a look at this code. It displays an image that change every second in a different thread, and returns after 5 seconds.
#include <opencv2/opencv.hpp>
#include <thread>
using namespace std;
using namespace cv;
class Capture {
private:
bool running;
std::thread thread;
cv::Mat background;
void loop() {
while (running) {
cv::imshow("sth", background);
cv::waitKey(1000);
Scalar color(rand()&255, rand()&255, rand()&255);
background.setTo(color);
}
}
public:
Capture() :
running{ false },
thread{},
background{ 800, 800, CV_8UC3, cv::Scalar{ 255, 0, 255 } } {
}
inline ~Capture() {
if (running) stop(); // stop and join the thread
}
void run() {
if (!running) {
running = true;
thread = std::thread{ &Capture::loop, this };
}
}
inline void join() { if (thread.joinable()) thread.join(); };
inline void stop() {
running = false;
if (thread.joinable()) {
thread.join();
}
}
};
int main()
{
Capture cap;
cap.run();
std::this_thread::sleep_for(std::chrono::milliseconds(5000));
cap.stop();
return 0;
}
Im trying to initialise VideoCapture capturedevice once in my multithreading program. Once initialised it should serve in the acquireImage-thread as the image-buffer, which is filled by a webcam.
The following code shows me a "OpenCV Error: Assertion failed (func != 0) in cv::imshow"-error during runtime, which means VideoCapture capturedevice is never really initialised and therefore imshow doesnt have necessary data, but why?
The code was constructed based on this question: Correctly using mutex in OpenCL-OpenCV-Realtime-Threads? Basically it uses two threads, one processes images originating from a usb-webcam and the second will find faces on those images...but does nothing at the moment for code-simplicity.
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <vector>
#include <cmath>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/ocl/ocl.hpp"
#include "opencv2/opencv.hpp"
#include <functional>
using namespace std;
using namespace cv;
typedef unsigned char uchar;
typedef unsigned int uint;
class FaceDetector
{
mutex imageLock, facesLock;
condition_variable imageAqcuired;
bool newImageAvailable;
Mat _img;
Mat _imgToWorkOn;
Mat _faceImages;
bool quit;
VideoCapture captureDevice;
int device_id;
FaceDetector (int _device_id);
void acquireImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return !newImageAvailable;}); //only take new image after current one was consumed
Mat captureFrame;
captureDevice>>captureFrame;
transpose(captureFrame,captureFrame);
flip(captureFrame,captureFrame,1);
_img = captureFrame.clone();
ulock.unlock();
newImageAvailable = true;
imageAqcuired.notify_one(); //notify that a new image is available
}
}
void processImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return newImageAvailable;}); //wait untill a new image is available
_imgToWorkOn = _img.clone();
ulock.unlock();
newImageAvailable = false;
imageAqcuired.notify_one(); //notify the current image can be replaced by a newer one
unique_lock<mutex> lockFace(facesLock);
//arbeit
lockFace.unlock();
}
}
public:
FaceDetector() : newImageAvailable(false) {}
void start() {
quit = false;
thread t1(&FaceDetector::acquireImage,this);
t1.detach();
thread t2(&FaceDetector::processImage,this);
t2.detach();
}
void stop() {
quit = true;
}
Mat getImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(imageLock);
return _img;
}
Mat getProcessedImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(facesLock);
return _faceImages;
}
};
FaceDetector::FaceDetector(int _device_id)
{
device_id = _device_id;
captureDevice.open(device_id);
captureDevice.set(CV_CAP_PROP_FRAME_WIDTH,620); //erst jetzt cam.set weil sonst system-pause nicht funzt
captureDevice.set(CV_CAP_PROP_FRAME_HEIGHT,480);
}
int main()
{
bool quit(false);
FaceDetector faceDet;
faceDet.start();
thread input([](bool &quitFlag) { getchar(); quitFlag = true; },ref(quit)); //stop on user press Enter
input.detach();
while (!quit) {
Mat img = faceDet.getImage();
Mat imgc = img.clone();
imshow("links", imgc);
/*
imgs = faceDet.getProcessedImage();
Mat imgsc = imgs.clone();
imshow("gsichter", imgsc);
*/
waitKey(30);
this_thread::sleep_for(chrono::milliseconds(33)); //no need to show more than 30 fps...
}
faceDet.stop();
return 0;
}
Edit: I tried to include your answer, still getting "Opencv Assertion Error in imshow".
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <vector>
#include <cmath>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/ocl/ocl.hpp"
#include "opencv2/opencv.hpp"
#include <functional>
using namespace std;
using namespace cv;
typedef unsigned char uchar;
typedef unsigned int uint;
class FaceDetector
{
mutex imageLock, facesLock;
condition_variable imageAqcuired;
bool newImageAvailable;
Mat _img;
Mat _imgToWorkOn;
Mat _faceImages;
bool quit;
VideoCapture captureDevice;
int device_id;
void acquireImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return !newImageAvailable;}); //only take new image after current one was consumed
Mat captureFrame;
captureDevice>>captureFrame;
//transpose(captureFrame,captureFrame);
//flip(captureFrame,captureFrame,1);
_img = captureFrame.clone();
ulock.unlock();
newImageAvailable = true;
imageAqcuired.notify_one(); //notify that a new image is available
}
}
void processImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return newImageAvailable;}); //wait untill a new image is available
_imgToWorkOn = _img.clone();
ulock.unlock();
newImageAvailable = false;
imageAqcuired.notify_one(); //notify the current image can be replaced by a newer one
unique_lock<mutex> lockFace(facesLock);
//arbeit
lockFace.unlock();
}
}
public:
FaceDetector() : newImageAvailable(false) {}
void start() {
quit = false;
thread t1(&FaceDetector::acquireImage,this);
t1.detach();
thread t2(&FaceDetector::processImage,this);
t2.detach();
}
void stop() {
quit = true;
}
Mat getImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(imageLock);
return _img;
}
Mat getProcessedImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(facesLock);
return _faceImages;
}
FaceDetector::FaceDetector(int _device_id)
{
VideoCapture captureDevice;
device_id = _device_id;
captureDevice.open(device_id);
captureDevice.set(CV_CAP_PROP_FRAME_WIDTH,620); //erst jetzt cam.set weil sonst system-pause nicht funzt
captureDevice.set(CV_CAP_PROP_FRAME_HEIGHT,480);
}
};
int main()
{
bool quit(false);
FaceDetector faceDet(0);
faceDet.start();
thread input([](bool &quitFlag) { getchar(); quitFlag = true; },ref(quit)); //stop on user press Enter
input.detach();
while (!quit) {
Mat img = faceDet.getImage();
Mat imgc = img.clone();
imshow("links", imgc);
/*
imgs = faceDet.getProcessedImage();
Mat imgsc = imgs.clone();
imshow("gsichter", imgsc);
*/
waitKey(30);
this_thread::sleep_for(chrono::milliseconds(33)); //no need to show more than 30 fps...
}
faceDet.stop();
return 0;
}
I also tried following code, but still getting said Assertion Error.
public:
FaceDetector(int device_id) : newImageAvailable(false) {}
....
void init() {
VideoCapture captureDevice;
captureDevice.open(device_id);
captureDevice.set(CV_CAP_PROP_FRAME_WIDTH,620); //erst jetzt cam.set weil sonst system-pause nicht funzt
captureDevice.set(CV_CAP_PROP_FRAME_HEIGHT,480);
}
int main()
{
FaceDetector faceDet(0);
faceDet.init();
faceDet.start();
}
In main() function You are not creating object faceDet using constructor FaceDetector::FaceDetector(int _device_id) but you are using default constructor. This means you are not opening captureDevice at all.
Edit for correction
In the declaration, make FaceDetector::FaceDetector(int _device_id) public.
Now in main(), create object faceDet using this constructor, you need to call like this:
FaceDetector faceDet(0); // I have taken 0 as as default camera ID, you can add other value like 1 or 2 etc depending the choice of camera too.
This should be be working now, please let me know of any problem should you face.