Related
I am using the OpenCV LK Optical Flow example, for some robot vision application with ROS.
It seems to be working reasonably well, however, I am having the error below:
OpenCV Error: Assertion failed ((npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0) in calc, file /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/video/src/lkpyramid.cpp, line 1231
terminate called after throwing an instance of 'cv::Exception'
what(): /build/opencv-L2vuMj/opencv-3.2.0+dfsg/modules/video/src/lkpyramid.cpp:1231: error: (-215) (npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0 in function calc
Aborted (core dumped)
As far I could understand by reading the error message, and by the system behavior, it is happening when all of the initial tracking points are gone in the current frame.
There would be a way to restart this process (defining new points) instead of aborting?
My code by the way:
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/videoio.hpp>
#include <opencv2/video.hpp>
#include <ros/ros.h>
#include <image_transport/image_transport.h>
#include <cv_bridge/cv_bridge.h>
#include <sensor_msgs/image_encodings.h>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
using namespace cv;
using namespace std;
static const std::string OPENCV_WINDOW = "Image_window";
class ImageConverter
{
ros::NodeHandle nh_;
image_transport::ImageTransport it_;
image_transport::Subscriber image_sub_;
image_transport::Publisher image_pub_;
public:
cv::Mat current_frame;
public:
ImageConverter()
: it_(nh_)
{
image_sub_ = it_.subscribe("/realsense/color/image_raw", 1,
&ImageConverter::imageCallback, this);
cv::namedWindow(OPENCV_WINDOW);
}
~ImageConverter()
{
cv::destroyWindow(OPENCV_WINDOW);
}
void imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
cv_bridge::CvImagePtr cv_image;
try
{
cv_image = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
current_frame = cv_image->image;
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
}
};
int main(int argc, char** argv)
{
ros::init(argc, argv, "OpenCV_Node");
ImageConverter MyNode;
ros::Rate r(30);
r.sleep();
ros::spinOnce();
r.sleep();
// Tentative of Using Open CV Tools with a ROS Image
// Lucas-Kanade Optical Flow from: https://docs.opencv.org/master/d4/dee/tutorial_optical_flow.html
// Create some random colors
vector<Scalar> colors;
RNG rng;
for(int i = 0; i < 100; i++)
{
int r = rng.uniform(0, 256);
int g = rng.uniform(0, 256);
int b = rng.uniform(0, 256);
colors.push_back(Scalar(r,g,b));
}
Mat old_frame, old_gray;
vector<Point2f> p0, p1;
// Take first frame and find corners in it
old_frame = MyNode.current_frame;
cvtColor(old_frame, old_gray, COLOR_BGR2GRAY);
goodFeaturesToTrack(old_gray, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
// Create a mask image for drawing purposes
Mat mask = Mat::zeros(old_frame.size(), old_frame.type());
//while(true){
while(ros::ok()){
r.sleep();
Mat frame, frame_gray;
frame = MyNode.current_frame;
if (frame.empty())
break;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
// calculate optical flow
vector<uchar> status;
vector<float> err;
TermCriteria criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
calcOpticalFlowPyrLK(old_gray, frame_gray, p0, p1, status, err, Size(15,15), 2, criteria);
vector<Point2f> good_new;
for(uint i = 0; i < p0.size(); i++)
{
// Select good points
if(status[i] == 1) {
good_new.push_back(p1[i]);
// draw the tracks
line(mask,p1[i], p0[i], colors[i], 2);
circle(frame, p1[i], 5, colors[i], -1);
}
}
Mat img;
add(frame, mask, img);
imshow(OPENCV_WINDOW, img);
int keyboard = waitKey(30);
if (keyboard == 'q' || keyboard == 27)
break;
// Now update the previous frame and previous points
old_gray = frame_gray.clone();
p0 = good_new;
ros::spinOnce();
}
return 0;
}
I change and get of vector elements.
So as the thread is running, i use a mutex to change the vector's elements.
but if i just want to enumerate the vector's elements do i still have to lock my vector?
Here is the code:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
using namespace std;
using namespace cv;
std::mutex facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url)
: Napi::AsyncWorker(callback), url(url) {
}
~FaceDetectWorker() {
}
vector<Rect> detectAndDraw( Mat& img, CascadeClassifier& cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
equalizeHist( smallImg, smallImg );
t = (double)getTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
t = (double)getTickCount() - t;
printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
while(running) {
capture >> frame;
if( frame.empty()) {
continue;
}
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectAndDraw( frame1, cascade);
facesMutex.lock();
faces = facesResult;
facesMutex.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(30));
}
} catch (std::exception &e) {
facesMutex.unlock();
Napi::AsyncWorker::SetError(e.what());
}
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>().ToString();
Napi::Function callback = info[1].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
facesMutex.lock();
for(int i = 0; i < faces.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
facesMutex.unlock();
return faceArray;
}
So the questionsis, if i use the FaceDetectGet, which only enumerating the vector, should I lock and unlock it?
Actually the solution was to use a shared mutex.
The code looks like this:
#include <napi.h>
#include "facedetect.h"
#include "opencv2/objdetect.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <thread>
#include <mutex> // For std::unique_lock
#include <shared_mutex>
// https://stackoverflow.com/questions/55313194/do-i-have-to-lock-a-vectors-that-i-just-enumerate-or-i-only-do-it-when-i-change?noredirect=1#comment97357425_55313194
using namespace std;
using namespace cv;
std::shared_mutex _facesMutex;
string cascadeName = "/usr/local/share/opencv4/haarcascades/haarcascade_frontalface_alt.xml";;
bool running = true;
vector<Rect> faces;
class FaceDetectWorker : public Napi::AsyncWorker {
public:
FaceDetectWorker(Napi::Function& callback, string url, int skip, int sleep)
: Napi::AsyncWorker(callback), url(url), skip(skip), sleep(sleep) {
}
~FaceDetectWorker() {
}
vector<Rect> detectFaces(Mat &img, CascadeClassifier &cascade)
{
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor( img, gray, COLOR_BGR2GRAY );
//double scale = 1;
// scale = 1, fx = 1 / scale
double fx = 1;
//resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR_EXACT );
//equalizeHist( gray, smallImg );
//t = (double)getTickCount();
cascade.detectMultiScale( gray, faces,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
//t = (double)getTickCount() - t;
//printf( "detection time = %g ms\n", t*1000/getTickFrequency());
return faces;
}
// Executed inside the worker-thread.
// It is not safe to access JS engine data structure
// here, so everything we need for input and output
// should go on `this`.
void Execute () {
running = true;
Mat frame, image;
VideoCapture capture;
CascadeClassifier cascade;
if (!cascade.load(samples::findFile(cascadeName)))
{
Napi::AsyncWorker::SetError("ERROR: Could not load classifier cascade");
return;
}
if (!capture.open(url))
{
Napi::AsyncWorker::SetError("ERROR: Error opening video stream " + url);
return;
}
if( capture.isOpened() )
{
cout << "Video capturing has been started ..." << endl;
try {
int skipCount = 0;
while(running) {
//capture.read(frame);
capture >> frame;
if( frame.empty()) {
continue;
}
skipCount++;
//cout<< "sleep " << sleep << " skip " << skip << endl;
if (skipCount >= skip) {
//cout<< "calculation " << skipCount << endl;
skipCount = 0;
Mat frame1 = frame.clone();
vector<Rect> facesResult = detectFaces(frame1, cascade);
std::unique_lock lock(_facesMutex);
faces = facesResult;
lock.unlock();
}
//waitKey(250);
std::this_thread::sleep_for(std::chrono::milliseconds(sleep));
}
} catch (std::exception &e) {
Napi::AsyncWorker::SetError(e.what());
}
} else {
Napi::AsyncWorker::SetError("ERROR: Could not open video camera " + url);
}
}
// Executed when the async work is complete
// this function will be run inside the main event loop
// so it is safe to use JS engine data again
void OnOK() {
Napi::HandleScope scope(Env());
Callback().Call({Env().Undefined(), Env().Undefined()});
}
private:
string url;
int skip = 3;
int sleep = 30;
};
// Asynchronous access to the `Estimate()` function
Napi::Value FaceDetect(const Napi::CallbackInfo& info) {
Napi::String url = info[0].As<Napi::String>();
Napi::Number skip = info[1].As<Napi::Number>();
Napi::Number sleep = info[2].As<Napi::Number>();
Napi::Function callback = info[3].As<Napi::Function>();
FaceDetectWorker* faceDetectWorker = new FaceDetectWorker(callback, url, skip, sleep);
faceDetectWorker->Queue();
return info.Env().Undefined();
}
Napi::Array FaceDetectGet(const Napi::CallbackInfo &info) {
Napi::Array faceArray = Napi::Array::New(info.Env(), faces.size());
std::shared_lock lock(_facesMutex);
vector<Rect> faces2 = faces;
lock.unlock();
for(int i = 0; i < faces2.size(); i++) {
Rect rect = faces[i];
Napi::Object obj = Napi::Object::New(info.Env());
obj.Set("x", rect.x);
obj.Set("y", rect.y);
obj.Set("width", rect.width);
obj.Set("height", rect.height);
faceArray[i] = obj;
}
return faceArray;
}
void FaceDetectCancel(const Napi::CallbackInfo &info) {
running = false;
}
Im trying to initialise VideoCapture capturedevice once in my multithreading program. Once initialised it should serve in the acquireImage-thread as the image-buffer, which is filled by a webcam.
The following code shows me a "OpenCV Error: Assertion failed (func != 0) in cv::imshow"-error during runtime, which means VideoCapture capturedevice is never really initialised and therefore imshow doesnt have necessary data, but why?
The code was constructed based on this question: Correctly using mutex in OpenCL-OpenCV-Realtime-Threads? Basically it uses two threads, one processes images originating from a usb-webcam and the second will find faces on those images...but does nothing at the moment for code-simplicity.
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <vector>
#include <cmath>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/ocl/ocl.hpp"
#include "opencv2/opencv.hpp"
#include <functional>
using namespace std;
using namespace cv;
typedef unsigned char uchar;
typedef unsigned int uint;
class FaceDetector
{
mutex imageLock, facesLock;
condition_variable imageAqcuired;
bool newImageAvailable;
Mat _img;
Mat _imgToWorkOn;
Mat _faceImages;
bool quit;
VideoCapture captureDevice;
int device_id;
FaceDetector (int _device_id);
void acquireImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return !newImageAvailable;}); //only take new image after current one was consumed
Mat captureFrame;
captureDevice>>captureFrame;
transpose(captureFrame,captureFrame);
flip(captureFrame,captureFrame,1);
_img = captureFrame.clone();
ulock.unlock();
newImageAvailable = true;
imageAqcuired.notify_one(); //notify that a new image is available
}
}
void processImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return newImageAvailable;}); //wait untill a new image is available
_imgToWorkOn = _img.clone();
ulock.unlock();
newImageAvailable = false;
imageAqcuired.notify_one(); //notify the current image can be replaced by a newer one
unique_lock<mutex> lockFace(facesLock);
//arbeit
lockFace.unlock();
}
}
public:
FaceDetector() : newImageAvailable(false) {}
void start() {
quit = false;
thread t1(&FaceDetector::acquireImage,this);
t1.detach();
thread t2(&FaceDetector::processImage,this);
t2.detach();
}
void stop() {
quit = true;
}
Mat getImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(imageLock);
return _img;
}
Mat getProcessedImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(facesLock);
return _faceImages;
}
};
FaceDetector::FaceDetector(int _device_id)
{
device_id = _device_id;
captureDevice.open(device_id);
captureDevice.set(CV_CAP_PROP_FRAME_WIDTH,620); //erst jetzt cam.set weil sonst system-pause nicht funzt
captureDevice.set(CV_CAP_PROP_FRAME_HEIGHT,480);
}
int main()
{
bool quit(false);
FaceDetector faceDet;
faceDet.start();
thread input([](bool &quitFlag) { getchar(); quitFlag = true; },ref(quit)); //stop on user press Enter
input.detach();
while (!quit) {
Mat img = faceDet.getImage();
Mat imgc = img.clone();
imshow("links", imgc);
/*
imgs = faceDet.getProcessedImage();
Mat imgsc = imgs.clone();
imshow("gsichter", imgsc);
*/
waitKey(30);
this_thread::sleep_for(chrono::milliseconds(33)); //no need to show more than 30 fps...
}
faceDet.stop();
return 0;
}
Edit: I tried to include your answer, still getting "Opencv Assertion Error in imshow".
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <vector>
#include <cmath>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/ocl/ocl.hpp"
#include "opencv2/opencv.hpp"
#include <functional>
using namespace std;
using namespace cv;
typedef unsigned char uchar;
typedef unsigned int uint;
class FaceDetector
{
mutex imageLock, facesLock;
condition_variable imageAqcuired;
bool newImageAvailable;
Mat _img;
Mat _imgToWorkOn;
Mat _faceImages;
bool quit;
VideoCapture captureDevice;
int device_id;
void acquireImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return !newImageAvailable;}); //only take new image after current one was consumed
Mat captureFrame;
captureDevice>>captureFrame;
//transpose(captureFrame,captureFrame);
//flip(captureFrame,captureFrame,1);
_img = captureFrame.clone();
ulock.unlock();
newImageAvailable = true;
imageAqcuired.notify_one(); //notify that a new image is available
}
}
void processImage()
{
while (!quit)
{
unique_lock<mutex> ulock(imageLock);
imageAqcuired.wait(ulock,[&](){return newImageAvailable;}); //wait untill a new image is available
_imgToWorkOn = _img.clone();
ulock.unlock();
newImageAvailable = false;
imageAqcuired.notify_one(); //notify the current image can be replaced by a newer one
unique_lock<mutex> lockFace(facesLock);
//arbeit
lockFace.unlock();
}
}
public:
FaceDetector() : newImageAvailable(false) {}
void start() {
quit = false;
thread t1(&FaceDetector::acquireImage,this);
t1.detach();
thread t2(&FaceDetector::processImage,this);
t2.detach();
}
void stop() {
quit = true;
}
Mat getImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(imageLock);
return _img;
}
Mat getProcessedImage() {
if (quit)
return Mat();
lock_guard<mutex> lock(facesLock);
return _faceImages;
}
FaceDetector::FaceDetector(int _device_id)
{
VideoCapture captureDevice;
device_id = _device_id;
captureDevice.open(device_id);
captureDevice.set(CV_CAP_PROP_FRAME_WIDTH,620); //erst jetzt cam.set weil sonst system-pause nicht funzt
captureDevice.set(CV_CAP_PROP_FRAME_HEIGHT,480);
}
};
int main()
{
bool quit(false);
FaceDetector faceDet(0);
faceDet.start();
thread input([](bool &quitFlag) { getchar(); quitFlag = true; },ref(quit)); //stop on user press Enter
input.detach();
while (!quit) {
Mat img = faceDet.getImage();
Mat imgc = img.clone();
imshow("links", imgc);
/*
imgs = faceDet.getProcessedImage();
Mat imgsc = imgs.clone();
imshow("gsichter", imgsc);
*/
waitKey(30);
this_thread::sleep_for(chrono::milliseconds(33)); //no need to show more than 30 fps...
}
faceDet.stop();
return 0;
}
I also tried following code, but still getting said Assertion Error.
public:
FaceDetector(int device_id) : newImageAvailable(false) {}
....
void init() {
VideoCapture captureDevice;
captureDevice.open(device_id);
captureDevice.set(CV_CAP_PROP_FRAME_WIDTH,620); //erst jetzt cam.set weil sonst system-pause nicht funzt
captureDevice.set(CV_CAP_PROP_FRAME_HEIGHT,480);
}
int main()
{
FaceDetector faceDet(0);
faceDet.init();
faceDet.start();
}
In main() function You are not creating object faceDet using constructor FaceDetector::FaceDetector(int _device_id) but you are using default constructor. This means you are not opening captureDevice at all.
Edit for correction
In the declaration, make FaceDetector::FaceDetector(int _device_id) public.
Now in main(), create object faceDet using this constructor, you need to call like this:
FaceDetector faceDet(0); // I have taken 0 as as default camera ID, you can add other value like 1 or 2 etc depending the choice of camera too.
This should be be working now, please let me know of any problem should you face.
I am trying to take difference of center pixel with 4 neighbor and add them and then replace the original with that difference value. but it always replace pixel with zero. I don't what i am doing wrong. thanks for any help
// newproject.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include "highgui.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <conio.h>
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void whatever(Mat Frame)
{
for(int i=0;i<Frame.cols;i++)
for(int j=0;j<Frame.rows;j++)
{
if(i==0&&j==0)
{
// cout<<"Check 1"<<endl;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i)-(Frame.at<Vec3b>(j+1,i))+(Frame.at<Vec3b>(j,i)-Frame.at<Vec3b>(j,i+1))+(Frame.at<Vec3b>(j,i)-Frame.at<Vec3b>(j+1,i))+(Frame.at<Vec3b>(j,i)-Frame.at<Vec3b>(j,i+1)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==Frame.cols-1&&j==Frame.rows-1)
{
// cout<<"Check 2"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==Frame.cols-1&&j==0)
{
//cout<<"Check 3"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==0&&j==Frame.rows-1)
{
// cout<<"Check 4"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)+Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==0)
{
// cout<<"Check 5"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i))+((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(j==0)
{
// cout<<"Check 6"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i)+(Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(i==Frame.cols-1)
{
// cout<<"Check 7"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+((Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i)))+((Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else if(j==Frame.rows-1)
{
// cout<<"Check 8"<<endl;
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b(j,i+1)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+((Frame.at<Vec3b>(j,i-1)-Frame.at<Vec3b>(j,i))));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
}
else
{
Frame.at<Vec3b>(j,i)=((Frame.at<Vec3b>(j-1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j+1,i)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i+1)-Frame.at<Vec3b>(j,i))+(Frame.at<Vec3b>(j,i-1)+Frame.at<Vec3b>(j,i)));
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[0]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[1]/4;
Frame.at<Vec3b>(j,i)=(Frame.at<Vec3b>(j,i))[2]/4;
Vec3d value = Frame.at<Vec3b>(j,i);
cout<<value[0]<<endl;
cout<<value[1]<<endl;
cout<<value[2]<<endl;
}
}
//hell(Frame);
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
// Mat tmp=frame.clone();
obj.whatever(frame);
// obj.hsv_histogram(frame);
// videoframes.push_back(tmp);
}
//displayvideo(videoframes);
//writer(videoframes);
}
};
int _tmain(int argc, _TCHAR* argv[])
{
video obj("video.avi");
It seems you need Lapacian with kernel size parameter = 1, see here: http://docs.opencv.org/modules/imgproc/doc/filtering.html?highlight=laplacian#laplacian
You just need divide result by 4 after convolution for correct Laplacian.
As for your code, I think you should use different images as source and destination, you cant do this inplace.
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void whatever(Mat Frame)
{
Frame.convertTo(Frame,CV_32FC3);
Mat newimage=Mat::zeros(Frame.size(),CV_32FC3);
for (int j=1;j<Frame.rows-1;++j)
{
for (int i=1;i<Frame.cols-1;++i)
{
newimage.at<Vec3f>(j,i)=Frame.at<Vec3f>(j,i)-
0.25*Frame.at<Vec3f>(j+1,i)-
0.25*Frame.at<Vec3f>(j,i+1)-
0.25*Frame.at<Vec3f>(j-1,i)-
0.25*Frame.at<Vec3f>(j,i-1);
cout << newimage.at<Vec3f>(j,i) << endl;
}
}
//imshow("result",newimage);
//cv::waitKey(30);
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(cv::CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(cv::CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
// Mat tmp=frame.clone();
obj.whatever(frame);
// obj.hsv_histogram(frame);
// videoframes.push_back(tmp);
}
//displayvideo(videoframes);
//writer(videoframes);
}
};
int main(int argc, char* argv[])
{
namedWindow("result");
video obj("D:\\ImagesForTest\\atrium.avi");
}
I am trying to display video for in a separate function for this i am using vector i push_back each frame in and then pass this vector to function but my function displays a single frame repetitively. My code is below. Please tell me what i am doing wrong.
// newproject.cpp : Defines the entry point for the console application.
#include "stdafx.h"
#include "highgui.h"
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
#include <stdio.h>
#include <conio.h>
#include <opencv2/imgproc/imgproc.hpp> // Gaussian Blur
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
class frameprocessing{
Mat hsv_base;
MatND hist_base;
public:
void hsv_histogram(Mat Frame)
{
cvtColor( Frame, hsv_base, CV_BGR2HSV );
int h_bins = 50;
int s_bins = 32;
int histSize[] = { h_bins, s_bins };
float h_ranges[] = { 0, 256 };
float s_ranges[] = { 0, 180 };
const float* ranges[] = { h_ranges, s_ranges };
int channels[] = { 0, 1 };
calcHist( &hsv_base, 1, channels, Mat(), hist_base, 2, histSize, ranges, true, false );
}
};
class video{
Mat frame;
string filename;
double dWidth;
double dHeight;
public:
video()
{
}
video(string videoname)
{
vector<Mat> videoframes;
std::vector<Mat>::iterator it;
it = videoframes.begin();
filename = videoname;
VideoCapture capture(filename);
if( !capture.isOpened() )
{
exit(0);
}
dWidth = capture.get(CV_CAP_PROP_FRAME_WIDTH); //get the width of frames of the video
dHeight = capture.get(CV_CAP_PROP_FRAME_HEIGHT); //get the height of frames of the video
frameprocessing obj;
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
obj.hsv_histogram(frame);
videoframes.push_back(frame);
}
displayvideo(videoframes);
// waitKey(0); // key press to close window
}
void writer()
{
Size frameSize(static_cast<int>(dWidth), static_cast<int>(dHeight));
VideoWriter oVideoWriter ("D:/MyVideo.avi", CV_FOURCC('P','I','M','1'), 20, frameSize, true); //initialize the VideoWriter object
if ( !oVideoWriter.isOpened() ) //if not initialize the VideoWriter successfully, exit the program
{
cout << "ERROR: Failed to write the video" << endl;
exit(0);
}
}
void displayvideo(vector<Mat> videoframe)
{
Mat tempframe;
while(!videoframe.empty()) //Show the image captured in the window and repeat
{
tempframe = videoframe.back();
imshow("video", tempframe);
videoframe.pop_back();
waitKey(20); // waits to display frame
}
// waitKey(0);
}
void displayframe(Mat frame)
{
imshow("video", frame);
waitKey(20); // waits to display frame
}
};
int _tmain(int argc, _TCHAR* argv[])
{
video obj("video.avi");
//obj.readvideo();
}
You need to copy or clone your frame to another Mat then push to your vector, change your code like
for( ; ; )
{
capture >> frame;
if(frame.empty())
break;
Mat tmp=frame.clone();
obj.hsv_histogram(tmp);
videoframes.push_back(tmp);
}
In your code your passing the same pointer allocated for frame to your vector every time, so you will get array of Mat pointing single(same) memory location in your vector. To know more about OpenCV Mat and memory allocation See documantation