I change and get of vector elements.
So as the thread is running, i use a mutex to change the vector's elements.
but if i just want to enumerate the vector's elements do i still have to lock my vector?
Here is the code:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
using namespace std;
using namespace cv;
std::mutex facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url)
: Napi::AsyncWorker(callback), url(url) {
}
~FaceDetectWorker() {
}
vector<Rect> detectAndDraw( Mat& img, CascadeClassifier& cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
equalizeHist( smallImg, smallImg );
t = (double)getTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
t = (double)getTickCount() - t;
printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
while(running) {
capture >> frame;
if( frame.empty()) {
continue;
}
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectAndDraw( frame1, cascade);
facesMutex.lock();
faces = facesResult;
facesMutex.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(30));
}
} catch (std::exception &e) {
facesMutex.unlock();
Napi::AsyncWorker::SetError(e.what());
}
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>().ToString();
Napi::Function callback = info[1].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
facesMutex.lock();
for(int i = 0; i < faces.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
facesMutex.unlock();
return faceArray;
}
So the questionsis, if i use the FaceDetectGet, which only enumerating the vector, should I lock and unlock it?
Actually the solution was to use a shared mutex.
The code looks like this:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
#include <mutex> // For std::unique_lock
#include <shared_mutex>
// https://stackoverflow.com/questions/55313194/do-i-have-to-lock-a-vectors-that-i-just-enumerate-or-i-only-do-it-when-i-change?noredirect=1#comment97357425_55313194
using namespace std;
using namespace cv;
std::shared_mutex _facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url, int skip, int sleep)
: Napi::AsyncWorker(callback), url(url), skip(skip), sleep(sleep) {
}
~FaceDetectWorker() {
}
vector<Rect> detectFaces(Mat &img, CascadeClassifier &cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
//resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
//equalizeHist( gray, smallImg );
//t = (double)getTickCount();
cascade.detectMultiScale( gray, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
//t = (double)getTickCount() - t;
//printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
running = true;
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
int skipCount = 0;
while(running) {
//capture.read(frame);
capture >> frame;
if( frame.empty()) {
continue;
}
skipCount++;
//cout<< "sleep " << sleep << " skip " << skip << endl;
if (skipCount >= skip) {
//cout<< "calculation " << skipCount << endl;
skipCount = 0;
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectFaces(frame1, cascade);
std::unique_lock lock(_facesMutex);
faces = facesResult;
lock.unlock();
}
//waitKey(250);
std::this_thread::sleep_for(std::chrono::milliseconds(sleep));
}
} catch (std::exception &e) {
Napi::AsyncWorker::SetError(e.what());
}
} else {
Napi::AsyncWorker::SetError("ERROR: Could not open video camera " + url);
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
int skip = 3;
int sleep = 30;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>();
Napi::Number skip = info[1].As<Napi::Number>();
Napi::Number sleep = info[2].As<Napi::Number>();
Napi::Function callback = info[3].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url, skip, sleep);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
std::shared_lock lock(_facesMutex);
vector<Rect> faces2 = faces;
lock.unlock();
for(int i = 0; i < faces2.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
return faceArray;
}
void FaceDetectCancel(const Napi::CallbackInfo &info) {
running = false;
}
I got the problem about timers which is
QObject::~QObject: Timers cannot be stopped from another thread
Below is the log data from my terminal.
$ ./boost_tutorials
init done
All done. Exit safely!
QObject::~QObject: Timers cannot be stopped from another thread
Here is my C++ code with thread from boost. I build it via CMakeLists.
#include <boost/thread.hpp>
#include <boost/chrono.hpp>
#include <iostream>
#include <opencv2/opencv.hpp>
class Robot {
public:
Robot() {
cap.open(-1);
SYSTEM_QUIT = false;
if (!cap.isOpened()) {
cout << "Error opening the video" << endl;
SYSTEM_QUIT = true;
}
}
~Robot() {
destroyAllWindows();
cout << "All done. Exit safely!" << endl;
}
void run() {
boost::thread* t1 = new boost::thread(boost::bind(&Robot::imageProcCallback, this));
boost::thread* t2 = new boost::thread(boost::bind(&Robot::simpleCallback, this));
t1->join();
t2->join();
}
void imageProcCallback() {
Mat frame;
int cc = 0;
while(!SYSTEM_QUIT) {
cap >> frame;
if (!msgs.empty()) {
Point pt(rand()%frame.cols, rand()%frame.rows);
putText(frame, msgs.back(), pt, cv::FONT_HERSHEY_COMPLEX, 2, Scalar(0, 255, 0));
}
imshow(filename, frame);
if (waitKey(5) == 27)
SYSTEM_QUIT = true;
}
frame.release();
}
void simpleCallback() {
while(!SYSTEM_QUIT) {
int count = rand() % 100;
std::string str = std::to_string(count);
msgs.push_back(str);
}
}
private:
string filename;
VideoCapture cap;
bool SYSTEM_QUIT;
std::vector<std::string> msgs;
};
void main() {
Robot robot;
robot.run();
}
I've written a fairly basic C++ program which uses OpenCV library to show an video steam for a IP camera I have.
Since I want to add image processing code in the future, I thought it would be a good idea to use threads to do it. One thread captures the most recent frame and the other thread reads this frame and displays it on screen. I used a pthread_mutex_t to lock the frame variable.
My problem is that the code actually compiles, but when I execute the program nothing happens, it just exists after couple of seconds. I've verified this is not a problem with the VideoCapture object, but I don't have any other idea why this does not work.
This is my code:
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <pthread.h>
using namespace cv;
using namespace std;
//GLOBALS
VideoCapture vcap;
Mat frame;
pthread_mutex_t *frameLocker;
const string videoStreamAddress = "http://10.0.0.6/mjpg/video.mjpg";
void *Proc(void *arg)
{
for(;;)
{
pthread_mutex_lock(frameLocker);
vcap.read(frame);
pthread_mutex_unlock(frameLocker);
}
}
int main(int, char**) {
frameLocker = new pthread_mutex_t();
vcap.open(videoStreamAddress);
pthread_mutex_init(frameLocker,NULL);
pthread_t *ProcThread;
pthread_create(ProcThread, NULL, Proc, NULL);
for(;;)
{
pthread_mutex_lock(frameLocker);
imshow("Output Window", frame);
pthread_mutex_unlock(frameLocker);
}
delete frameLocker;
}
I'd be glad if you could help me solve this issue.
Thanks,
Matan
I was able to solve this using the following code:
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <pthread.h>
using namespace cv;
using namespace std;
//GLOBALS
VideoCapture vcap;
Mat frame;
pthread_mutex_t frameLocker;
const string videoStreamAddress = "http://IP/mjpg/video.mjpg";
void *UpdateFrame(void *arg)
{
for(;;)
{
Mat tempFrame;
vcap >> tempFrame;
pthread_mutex_lock(&frameLocker);
frame = tempFrame;
pthread_mutex_unlock(&frameLocker);
}
}
int main(int, char**) {
vcap.open(videoStreamAddress);
pthread_mutex_init(&frameLocker,NULL);
pthread_t UpdThread;
pthread_create(&UpdThread, NULL, UpdateFrame, NULL);
for(;;)
{
Mat currentFrame;
pthread_mutex_lock(&frameLocker);
currentFrame = frame;
pthread_mutex_unlock(&frameLocker);
if(currentFrame.empty()){
printf("recieved empty frame\n");
continue;
}
imshow("Output Window", currentFrame);
waitKey(1);
}
}
I am trying to take difference of center pixel with 4 neighbor and add them and then replace the original with that difference value. but it always replace pixel with zero. I don't what i am doing wrong. thanks for any help
// newproject.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include "highgui.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <conio.h>
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void whatever(Mat Frame)
{
for(int i=0;i<Frame.cols;i++)
for(int j=0;j<Frame.rows;j++)
{
if(i==0&&j==0)
{
// cout<<"Check 1"<<endl;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i)-(Frame.at<Vec3b>(j+1,i))+(Frame.at<Vec3b>(j,i)-Frame.at<Vec3b>(j,i+1))+(Frame.at<Vec3b>(j,i)-Frame.at<Vec3b>(j+1,i))+(Frame.at<Vec3b>(j,i)-Frame.at<Vec3b>(j,i+1)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==Frame.cols-1&&j==Frame.rows-1)
{
// cout<<"Check 2"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==Frame.cols-1&&j==0)
{
//cout<<"Check 3"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==0&&j==Frame.rows-1)
{
// cout<<"Check 4"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)+Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==0)
{
// cout<<"Check 5"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i))+((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(j==0)
{
// cout<<"Check 6"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i)+(Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==Frame.cols-1)
{
// cout<<"Check 7"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+((Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i)))+((Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(j==Frame.rows-1)
{
// cout<<"Check 8"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b(j,i+1)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+((Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else
{
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
Vec3d value = Frame.at<Vec3b>(j,i);
cout<<value[0]<<endl;
cout<<value[1]<<endl;
cout<<value[2]<<endl;
}
}
//hell(Frame);
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
// Mat tmp=frame.clone();
obj.whatever(frame);
// obj.hsv_histogram(frame);
// videoframes.push_back(tmp);
}
//displayvideo(videoframes);
//writer(videoframes);
}
};
int _tmain(int argc, _TCHAR* argv[])
{
video obj("video.avi");
It seems you need Lapacian with kernel size parameter = 1, see here: http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian
You just need divide result by 4 after convolution for correct Laplacian.
As for your code, I think you should use different images as source and destination, you cant do this inplace.
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void whatever(Mat Frame)
{
Frame.convertTo(Frame,CV_32FC3);
Mat newimage=Mat::zeros(Frame.size(),CV_32FC3);
for (int j=1;j<Frame.rows-1;++j)
{
for (int i=1;i<Frame.cols-1;++i)
{
newimage.at<Vec3f>(j,i)=Frame.at<Vec3f>(j,i)-
0.25*Frame.at<Vec3f>(j+1,i)-
0.25*Frame.at<Vec3f>(j,i+1)-
0.25*Frame.at<Vec3f>(j-1,i)-
0.25*Frame.at<Vec3f>(j,i-1);
cout << newimage.at<Vec3f>(j,i) << endl;
}
}
//imshow("result",newimage);
//cv::waitKey(30);
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(cv::CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(cv::CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
// Mat tmp=frame.clone();
obj.whatever(frame);
// obj.hsv_histogram(frame);
// videoframes.push_back(tmp);
}
//displayvideo(videoframes);
//writer(videoframes);
}
};
int main(int argc, char* argv[])
{
namedWindow("result");
video obj("D:\\ImagesForTest\\atrium.avi");
}
I am trying to display video for in a separate function for this i am using vector i push_back each frame in and then pass this vector to function but my function displays a single frame repetitively. My code is below. Please tell me what i am doing wrong.
// newproject.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include "highgui.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <conio.h>
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void hsv_histogram(Mat Frame)
{
cvtColor( Frame, hsv_base, CV_BGR2HSV );
int h_bins = 50;
int s_bins = 32;
int histSize[] = { h_bins, s_bins };
float h_ranges[] = { 0, 256 };
float s_ranges[] = { 0, 180 };
const float* ranges[] = { h_ranges, s_ranges };
int channels[] = { 0, 1 };
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
std::vector<Mat>::iterator it;
it = videoframes.begin();
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
obj.hsv_histogram(frame);
videoframes.push_back(frame);
}
displayvideo(videoframes);
// waitKey(0); // key press to close window
}
void writer()
{
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
VideoWriter oVideoWriter ("D:/MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
{
cout << "ERROR: Failed to write the video" << endl;
exit(0);
}
}
void displayvideo(vector<Mat> videoframe)
{
Mat tempframe;
while(!videoframe.empty()) //Show the image captured in the window and repeat
{
tempframe = videoframe.back();
imshow("video", tempframe);
videoframe.pop_back();
waitKey(20); // waits to display frame
}
// waitKey(0);
}
void displayframe(Mat frame)
{
imshow("video", frame);
waitKey(20); // waits to display frame
}
};
int _tmain(int argc, _TCHAR* argv[])
{
video obj("video.avi");
//obj.readvideo();
}
You need to copy or clone your frame to another Mat then push to your vector, change your code like
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
Mat tmp=frame.clone();
obj.hsv_histogram(tmp);
videoframes.push_back(tmp);
}
In your code your passing the same pointer allocated for frame to your vector every time, so you will get array of Mat pointing single(same) memory location in your vector. To know more about OpenCV Mat and memory allocation See documantation