Open CV Harris Corner Detection - c++

I have created a simple class to detect corners using
harris algorithm
. it is working but when I change the threshold value using trackbar and when the value is getting low (below 100) then the process is stopped.
this is my sample code.
my header file
class CornerCapturer{
struct configValues
{
int block;
int k_size;
int thre;
double k;
configValues() :block(2), k_size(3), thre(100), k(0.04){}
};
public:
static configValues conf;
void captureCorners();
static void captevent(int, void*);
Mat captureCornerPoints(int thresh, int block, int k_size, double k, Mat frame);
};
my Cpp file
void CaptureBall::detectCorners(){
CornerCapturer capt;
VideoCapture cap = capture.captureVideo();
cap.read(frame);
capt.captureCorners();
for (;;){
cap.read(frame);
capt.captevent(0, 0);
waitKey(5);
}
}
void CornerCapturer::captureCorners(){
int *t = &CornerCapturer::conf.thre;
namedWindow("Open", CV_WINDOW_AUTOSIZE);
captevent(0, 0);
createTrackbar("Thresh", "Open", t, 255, CornerCapturer::captevent);
}
void CornerCapturer::captevent(int, void*){
Mat res;
CornerCapturer cap;
res = cap.captureCornerPoints(CornerCapturer::conf.thre, CornerCapturer::conf.block, CornerCapturer::conf.k_size, CornerCapturer::conf.k, frame);
imshow("Open", res);
}
Mat CornerCapturer::captureCornerPoints(int thresh, int block, int k_size, double k, Mat f){
Mat gray, corner_detected, cnv_normal,copy;
copy = f;
cvtColor(copy, gray, COLOR_RGB2GRAY); // convert to gray image
cornerHarris(gray, corner_detected, block, k_size, k, BORDER_DEFAULT); // detect cornet points
normalize(corner_detected, cnv_normal, 0, 255, NORM_MINMAX, CV_32FC1, Mat()); // normalize image
for (int j = 0; j < cnv_normal.rows; j++)
{
for (int i = 0; i < cnv_normal.cols; i++)
{
if ((int)cnv_normal.at<float>(j, i) > thresh)
{
circle(copy, Point(i, j), 8, Scalar(120), 2, 8, 0); // put points
}
}
}
return copy;
}
i used OpenCV library for track corners

Related

Face Landmarks and stabilization using optical flow

I write program when window show face and some special points(68).
I use Haar casscade and FaceLandmarkLBF.I have problem in my program. When face have stable position face points are jitter(shaking). How I can fix that? Thanks.
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/face.hpp>
using cv::Scalar;
using cv::Point;
int main(int argc, char** argv)
{
cv::CascadeClassifier faceDetector("haarcascade_frontalface_alt2.xml");
cv::Ptr<cv::face::Facemark>facemark = cv::face::FacemarkLBF::create();
facemark->loadModel("lbfmodel.yaml");
cv::VideoCapture vc(0);
while (true)
{
cv::Mat frame, gray;
vc.read(frame);
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
//
std::vector<cv::Rect> faces;
faceDetector.detectMultiScale(gray, faces);
std::vector< std::vector<cv::Point2f> > landmarks;
bool success = facemark->fit(frame, faces, landmarks);
for (size_t i = 0; i < landmarks.size(); i++)
{
for (size_t j = 0; j < landmarks[i].size(); j++)
{
cv::circle(frame, cv::Point(landmarks[i][j].x, landmarks[i][j].y), 2, Scalar(255, 0, 0), 2);
}
}
cv::imshow("1", frame);
if ((char)cv::waitKey(20) == 27)
break;
}
return 0;
}
I saw #Nuzhny link : lkdemo.cpp. Not everything is clear for me.
I done rewrite my code but nothing changed:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/objdetect/objdetect.hpp>
#include "opencv2/video/tracking.hpp"
#include <opencv2/face.hpp>
int main(int argc, char** argv)
{
cv::CascadeClassifier faceDetector("haarcascade_frontalface_alt2.xml");
cv::Ptr<cv::face::Facemark>facemark = cv::face::FacemarkLBF::create();
facemark->loadModel("lbfmodel.yaml");
cv::VideoCapture vc(0);
cv::Mat gray, prevGray, image, frame;
cv::Size subPixWinSize(10, 10), winSize(64, 64);
cv::TermCriteria termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 20, 0.03);
std::vector<uchar> status;
std::vector<float> err;
std::vector<cv::Point2f> oldLandmarks;
std::vector< std::vector<cv::Point2f> > landmarks;
bool b = true;
while (true)
{
vc.read(frame);
cv::cvtColor(frame, gray, cv::COLOR_BGR2GRAY);
std::vector<cv::Rect> faces;
faceDetector.detectMultiScale(gray, faces);
bool success = facemark->fit(frame, faces, landmarks);
if (!success)
{
cv::imshow("1", frame);
continue;
}
if (oldLandmarks.empty())
oldLandmarks = landmarks.front();
if (prevGray.empty())
gray.copyTo(prevGray);
calcOpticalFlowPyrLK(prevGray, gray, landmarks.front(), oldLandmarks, status, err, winSize, 3, termcrit, cv::OPTFLOW_LK_GET_MIN_EIGENVALS, 0.001);
for (size_t i = 0; i < oldLandmarks.size(); i++)
{
cv::circle(frame, cv::Point(oldLandmarks[i].x, oldLandmarks[i].y), 2, cv::Scalar(255, 0, 0), 2);
}
cv::imshow("1", frame);
std::swap(oldLandmarks, landmarks.front());
cv::swap(prevGray, gray);
if ((char)cv::waitKey(20) == 27)
break;
}
return 0;
}
Only LK tracking may be not enough. I'm writing some simple application for correcting landmarks after LK with linear Kalman filter (EDIT 2 - remove prev landmarks):
#include <opencv2/opencv.hpp>
#include <opencv2/face.hpp>
///
class PointState
{
public:
PointState(cv::Point2f point)
:
m_point(point),
m_kalman(4, 2, 0, CV_64F)
{
Init();
}
void Update(cv::Point2f point)
{
cv::Mat measurement(2, 1, CV_64FC1);
if (point.x < 0 || point.y < 0)
{
Predict();
measurement.at<double>(0) = m_point.x; //update using prediction
measurement.at<double>(1) = m_point.y;
m_isPredicted = true;
}
else
{
measurement.at<double>(0) = point.x; //update using measurements
measurement.at<double>(1) = point.y;
m_isPredicted = false;
}
// Correction
cv::Mat estimated = m_kalman.correct(measurement);
m_point.x = static_cast<float>(estimated.at<double>(0)); //update using measurements
m_point.y = static_cast<float>(estimated.at<double>(1));
Predict();
}
cv::Point2f GetPoint() const
{
return m_point;
}
bool IsPredicted() const
{
return m_isPredicted;
}
private:
cv::Point2f m_point;
cv::KalmanFilter m_kalman;
double m_deltaTime = 0.2;
double m_accelNoiseMag = 0.3;
bool m_isPredicted = false;
void Init()
{
m_kalman.transitionMatrix = (cv::Mat_<double>(4, 4) <<
1, 0, m_deltaTime, 0,
0, 1, 0, m_deltaTime,
0, 0, 1, 0,
0, 0, 0, 1);
m_kalman.statePre.at<double>(0) = m_point.x; // x
m_kalman.statePre.at<double>(1) = m_point.y; // y
m_kalman.statePre.at<double>(2) = 1; // init velocity x
m_kalman.statePre.at<double>(3) = 1; // init velocity y
m_kalman.statePost.at<double>(0) = m_point.x;
m_kalman.statePost.at<double>(1) = m_point.y;
cv::setIdentity(m_kalman.measurementMatrix);
m_kalman.processNoiseCov = (cv::Mat_<double>(4, 4) <<
pow(m_deltaTime, 4.0) / 4.0, 0, pow(m_deltaTime, 3.0) / 2.0, 0,
0, pow(m_deltaTime, 4.0) / 4.0, 0, pow(m_deltaTime, 3.0) / 2.0,
pow(m_deltaTime, 3.0) / 2.0, 0, pow(m_deltaTime, 2.0), 0,
0, pow(m_deltaTime, 3.0) / 2.0, 0, pow(m_deltaTime, 2.0));
m_kalman.processNoiseCov *= m_accelNoiseMag;
cv::setIdentity(m_kalman.measurementNoiseCov, cv::Scalar::all(0.1));
cv::setIdentity(m_kalman.errorCovPost, cv::Scalar::all(.1));
}
cv::Point2f Predict()
{
cv::Mat prediction = m_kalman.predict();
m_point.x = static_cast<float>(prediction.at<double>(0));
m_point.y = static_cast<float>(prediction.at<double>(1));
return m_point;
}
};
///
void TrackPoints(cv::Mat prevFrame, cv::Mat currFrame,
const std::vector<cv::Point2f>& currLandmarks,
std::vector<PointState>& trackPoints)
{
// Lucas-Kanade
cv::TermCriteria termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 30, 0.01);
cv::Size winSize(7, 7);
std::vector<uchar> status(trackPoints.size(), 0);
std::vector<float> err;
std::vector<cv::Point2f> newLandmarks;
std::vector<cv::Point2f> prevLandmarks;
std::for_each(trackPoints.begin(), trackPoints.end(), [&](const PointState& pts) { prevLandmarks.push_back(pts.GetPoint()); });
cv::calcOpticalFlowPyrLK(prevFrame, currFrame, prevLandmarks, newLandmarks, status, err, winSize, 3, termcrit, 0, 0.001);
for (size_t i = 0; i < status.size(); ++i)
{
if (status[i])
{
trackPoints[i].Update((newLandmarks[i] + currLandmarks[i]) / 2);
}
else
{
trackPoints[i].Update(currLandmarks[i]);
}
}
}
///
int main(int argc, char** argv)
{
cv::CascadeClassifier faceDetector("haarcascade_frontalface_alt2.xml");
cv::Ptr<cv::face::Facemark> facemark = cv::face::FacemarkLBF::create();
facemark->loadModel("lbfmodel.yaml");
cv::VideoCapture cam(0, cv::CAP_DSHOW);
cv::namedWindow("Facial Landmark Detection", cv::WINDOW_NORMAL);
cv::Mat frame;
cv::Mat currGray;
cv::Mat prevGray;
std::vector<PointState> trackPoints;
trackPoints.reserve(68);
while (cam.read(frame))
{
std::vector<cv::Rect> faces;
cv::cvtColor(frame, currGray, cv::COLOR_BGR2GRAY);
faceDetector.detectMultiScale(currGray, faces, 1.1, 3, cv::CASCADE_FIND_BIGGEST_OBJECT);
std::vector<std::vector<cv::Point2f>> landmarks;
bool success = facemark->fit(frame, faces, landmarks);
if (success)
{
if (prevGray.empty())
{
trackPoints.clear();
for (cv::Point2f lp : landmarks[0])
{
trackPoints.emplace_back(lp);
}
}
else
{
if (trackPoints.empty())
{
for (cv::Point2f lp : landmarks[0])
{
trackPoints.emplace_back(lp);
}
}
else
{
TrackPoints(prevGray, currGray, landmarks[0], trackPoints);
}
}
for (const PointState& tp : trackPoints)
{
cv::circle(frame, tp.GetPoint(), 3, tp.IsPredicted() ? cv::Scalar(0, 0, 255) : cv::Scalar(0, 255, 0), cv::FILLED);
}
for (cv::Point2f lp : landmarks[0])
{
cv::circle(frame, lp, 2, cv::Scalar(255, 0, 255), cv::FILLED);
}
}
cv::imshow("Facial Landmark Detection", frame);
if (cv::waitKey(1) == 27)
break;
prevGray = currGray;
}
return 0;
}
So, the margenta points - raw landmarks and green points - corrected after LK+Kalman: result video.
You can change Kalman options with 2 constants:
double m_deltaTime = 0.2;
double m_accelNoiseMag = 0.3;
It's latency and noise.

Face Detection Using KinectV2 and OpenCV

Recently I have been trying to learn OpenCV and was trying to detect faces using the Haar Classifier.
I was successful in detecting faces when I obtained the video stream from the default webcam,but when I use Kinect instead of the default webcam, It still detects face but the frame rate drops tremendously.
The code that I've Written is
int main() {
string haar_face = "F:\haarcascade_frontalface_default.xml";
CascadeClassifier haar_cascade;
haar_cascade.load(haar_face);
if (haar_cascade.empty()) {
return -1;
}
vector<Rect_<int>> faces;
bool optionKinect = false;
cout << "Choose Option\n1.) Kinect \n>1.) WebCam\n";
int choice;
cin >> choice;
if (choice == 1) {
optionKinect = true;
}
if (optionKinect) {
CKinectStreamsMat* kinectStream = new CKinectStreamsMat();
kinectStream->initSensor();
while (true) {
Mat original, gray;
Mat face;
Rect face_i;
//cap >> original;
original = kinectStream->getColorFrame();
if (original.data) {
cvtColor(original, gray, CV_BGR2GRAY);
haar_cascade.detectMultiScale(gray, faces);
int size = faces.size();
for (size_t i = 0; i < size; i++) {
face_i = faces[i];
face = gray(face_i);
rectangle(original, face_i, CV_RGB(0, 255, 0), 1);
}
imshow("original", original);
if(waitKey(20) == 27){
break;
}
}
}
}
else {
VideoCapture cap(0);
while (true) {
Mat original, gray;
Mat face;
Rect face_i;
cap >> original;
//original = kinectStream->getColorFrame();
if (original.data) {
cvtColor(original, gray, CV_BGR2GRAY);
haar_cascade.detectMultiScale(gray, faces);
int size = faces.size();
for (size_t i = 0; i < size; i++) {
face_i = faces[i];
face = gray(face_i);
rectangle(original, face_i, CV_RGB(0, 255, 0), 1);
}
imshow("original", original);
if(waitKey(20) == 27){
break;
}
}
}
}
}
And this is how I am obtaining the Color frame from Kinect.
cv::Mat CKinectStreamsMat::getColorFrame()
{
HRESULT hr = E_FAIL;
IColorFrame* frame = NULL;
IFrameDescription* frameDesc;
cv::Mat colorImage;
hr = _color_reader->AcquireLatestFrame(&frame);
if (SUCCEEDED(hr)) {
hr = frame->get_FrameDescription(&frameDesc);
if (SUCCEEDED(hr)) {
int frameWidth = 0, frameHeight = 0;
hr = frameDesc->get_Width(&frameWidth);
if (SUCCEEDED(hr)) {
hr = frameDesc->get_Height(&frameHeight);
}
if (SUCCEEDED(hr)) {
const int imgSize = frameWidth*frameHeight * 4 * sizeof(unsigned char); //4 Channels(BGRA)
colorImage = cv::Mat(1080,1920,CV_8UC4);
hr = frame->CopyConvertedFrameDataToArray(imgSize, reinterpret_cast<BYTE*>(colorImage.data), ColorImageFormat_Bgra);
}
}
SafeRelease(frameDesc);
SafeRelease(frame);
}
return colorImage;
}
I thought the reason for low performance might be the difference in resolution of the frames provided by the WebCam and Kinect, So I also tried scaling down the frame provided by Kinect down to the size that was even less than the frame size of WebCam. But still the performance was very low.
As this is only what I could think of and now I am out of ideas, So could anyone please tell what could be the reason for this low performance?

opencv playing video slow speed

I have a problem with video speed, when I just run a video file in opencv it plays in normal speed.
The problem starts when I apply face and eye detection to the video it plays very slowly. I have tried to change waitKey() values , but the problem still exists.
Why is this happening?
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
// Function Headers
void detectAndDisplay(Mat frame);
// Global variables
// Copy this file from opencv/data/haarscascades to target folder
string face_cascade_name = "haarcascade_frontalface_alt.xml";
string eye_cascade_name = "haarcascade_mcs_eyepair_big.xml";
CascadeClassifier face_cascade;
CascadeClassifier eye_cascade;
string window_name = "Capture - Face detection";
int filenumber; // Number of file to be saved
string filename;
int main(void)
{
VideoCapture capture("m.mp4");
if (!capture.isOpened()) // check if we succeeded
return -1;
// Load the cascade
if (!face_cascade.load(face_cascade_name))
{
printf("--(!)Error loading\n");
return (-1);
}
if (!eye_cascade.load(eye_cascade_name))
{
printf("--(!)Error loading\n eye ");
return (-1);
}
// Read the video stream
Mat frame;
for (;;)
{
capture >> frame;
// Apply the classifier to the frame
if (!frame.empty())
{
detectAndDisplay(frame);
}
else
{
printf(" --(!) No captured frame -- Break!");
break;
}
waitKey(40);
return 0;
}
}
void detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
std::vector<Rect> eyes;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 4, CASCADE_SCALE_IMAGE, Size(20, 20));
// Set Region of Interest
size_t i = 0; // ic is index of current element
for (i = 0; i < faces.size(); i++) // Iterate through all current elements (detected faces)
{
Point pt1(faces[i].x, faces[i].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[i].x + faces[i].height), (faces[i].y + faces[i].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
// set ROI for the eyes
Rect Roi = faces[i];
Roi.height = Roi.height / 4;
Roi.y = Roi.y + Roi.height;
cv::Mat crop = frame(Roi);
cvtColor(crop, gray, CV_BGR2GRAY);
imshow("ROI", gray);
eye_cascade.detectMultiScale(gray, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(20, 20));
for (size_t j = 0; j < eyes.size(); j++)
{
Rect r(Roi.x + eyes[j].x, Roi.y + eyes[j].y, eyes[j].width, eyes[j].height);
rectangle(frame, r, Scalar(255, 0, 0), 3, 1, 0);
// convert roi to ychannel
Mat eye_region = frame(eyes[j]).clone();
Mat eye_region_YCbCr;
cvtColor(eye_region, eye_region_YCbCr, CV_BGR2YCrCb);
vector<Mat> channels;
split(eye_region_YCbCr, channels);
Mat eye_region_Ychannel = channels[0].clone();
imshow("Y", eye_region_Ychannel);
// end of convert
}
}
// Show image
imshow("original", frame);
if (!crop.empty())
{
imshow("detected", crop);
}
else
{
destroyWindow("detected");
}
}

findContours returns assertion failed

Seems that the findContours function has been returning some assertion fails with Visual Studios C++ 2012. I've made sure that all my include directories were fine.
void track (Mat input_video, Mat &output_video)
{
Mat temp;
input_video.copyTo(temp);
vector<vector<cv::Point> > contours;
vector<Vec4i> hierarchy;
findContours(temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
//Only search through the most external layer.
for (int i=0; i>=0; i=hierarchy[i][0])
{
Moments moment = moments((Mat) contours[i]);
double area= moment.m00;
double xmc= moment.m10;
double ymc= moment.m01;
double x= xmc/area;
double y= ymc/area;
circle(output_video, Point(x,y), 3, Scalar(0,255,255));
}
}
EDIT:
The rest of my code if you may need it:
#include "opencv2/opencv.hpp"
#include <iostream>
using namespace std;
using namespace cv;
//This function threshold the HSV image and create a binary image
Mat GetThresholdedImage(Mat imgHSV, int huemin, int satmin, int valmin, int huemax, int satmax, int valmax)
{
//Size s= imgHSV->size();
Mat imgThresh(imgHSV.size().height, imgHSV.size().width, CV_8U);
//returns matrix that contains all values within the given HSV range.
inRange(imgHSV, Scalar(huemin,satmin,valmin), Scalar(huemax,satmax,valmax), imgThresh);
return imgThresh;
}
Mat morph(Mat imgThresh)
{
Mat erode_element= getStructuringElement(MORPH_RECT, Size(3,3));
Mat dilate_element= getStructuringElement(MORPH_RECT, Size(8,8));
erode(imgThresh, imgThresh, erode_element);
erode(imgThresh, imgThresh, erode_element);
dilate(imgThresh, imgThresh, dilate_element);
return imgThresh;
}
void track (Mat input_video, Mat &output_video)
{
Mat temp;
input_video.copyTo(temp);
vector< vector<Point> > contours;
cerr<< contours.size();
vector<Vec4i> hierarchy;
findContours(temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
cerr<<contours.size();
//Only search through the most external layer.
for (int i=0; i>=0; i=hierarchy[i][0])
{
Moments moment = moments((Mat) contours[i]);
double area= moment.m00;
double xmc= moment.m10;
double ymc= moment.m01;
double x= xmc/area;
double y= ymc/area;
circle(output_video, Point(x,y), 3, Scalar(0,255,255));
}
}
int main()
{
VideoCapture capture(0);
if(!capture.isOpened()){
cerr<< "Capture failed";
return -1;
}
Mat frame;
namedWindow("Original");
namedWindow("Binary");
//Create slider for adjustments...
namedWindow("Adjustments");
int huemin= 0, satmin= 0, valmin= 0;
int huemax= 256, satmax= 256, valmax= 256;
createTrackbar("Min Hue", "Adjustments", &huemin, 256);
createTrackbar("Max Hue", "Adjustments", &huemax, 256);
createTrackbar("Min Saturation", "Adjustments", &satmin, 256);
createTrackbar("Max Saturation", "Adjustments", &satmax, 256);
createTrackbar("Min Value", "Adjustments", &valmin, 256);
createTrackbar("Max Value", "Adjustments", &valmax, 256);
//iterate through each frames of the video
while(true)
{
//Grabs each frame from video to be processed.
capture>> frame;
//Error checks and breaks if the grab failed.
if(!frame.data)
{
cerr<< "Failed to grab frame\n";
break;
}
//Apply a Gaussian Blur kernel.
//GaussianBlur(frame, frame, Size(3,3), 3, 3, 4);
Mat imgHSV(frame.size(), CV_8UC3);
cvtColor(frame, imgHSV, CV_BGR2HSV, 0); //Change the color format from BGR to HSV
Mat imgThresh = GetThresholdedImage(imgHSV, huemin, satmin, valmin, huemax, satmax, valmax);
//GaussianBlur(imgThresh, imgThresh, Size(5,5), 3, 3, 4); //smooth the binary image using Gaussian kernel
imgThresh= morph(imgThresh);
track(imgThresh, frame);
imshow("Binary", imgThresh);
imshow("Original", frame);
int key = waitKey(30);
//If 'ESC' is pressed, break the loop
if(key==27 ) break;
}
destroyAllWindows();
//cvReleaseCapture(&capture);
return 0;
}
Eugene, I encountered the same problem. try to add "cv::imshow("im", output_video);"
as mentioned in - OpenCV findContours causes Debug Assertion Failed at return

Best way not to have to copy a Mat in OpenCV

I have this code that basically does a "dumb" background subtraction on two frames.
void FrameDifferenceBGS::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRate)
{
cv::Mat img_input = _image.getMat();
if(img_input.empty())
return;
_fgmask.create(img_input.size(), CV_8U);
cv::Mat img_foreground = _fgmask.getMat();
if(img_input_prev.empty())
{
img_input.copyTo(img_input_prev);
return;
}
cv::absdiff(img_input_prev, img_input, img_foreground);
if(img_foreground.channels() == 3)
cv::cvtColor(img_foreground, img_foreground, CV_BGR2GRAY);
if(enableThreshold)
cv::threshold(img_foreground, img_foreground, threshold, 255, cv::THRESH_BINARY);
if(showOutput)
cv::imshow("Frame Difference", img_foreground);
img_input.copyTo(img_input_prev);
img_foreground.copyTo(_fgmask);
firstTime = false;
}
If I don't add img_foreground.copyTo(_fgmask) in the end, the output array isn't updated with the result of img_foreground, resulting on a black image when this is called.
What am I doing wrong / should be doing here?
I reviewed your code again. It looks like you are creating new object for _fgmask.
_fgmask.create(img_input.size(), CV_8U);
I think this is why you have the problem. Because of this reference in the argument is different from the one after this statement. Why don't you call the line before calling the function.
fix
change _fgmask.create(img_input.size(), CV_8U); to _fgmask.create(img_input.size(), CV_8UC3); or _fgmask.create(img_input.size(), img_input.type());
why
this is because cv::absdiff(img_input_prev, img_input, img_foreground); recreate a new array everytime internally. and it does update the img_foreground structure but after the allocation, the memory address data inside _fgmask fail to change since the headers are passed by value.
you can seemlingly fix this(but still incurs creation cost) by doing cv::Mat& img_foreground = _fgmask.getMatRef();
and that is because CV_8U is not the same as CV_8UC3 and therefore the check # Mat::create() in mat.hpp always end up allocating a new array due to type difference
opinion
i think...maybe use Mat instead?
#include "opencv2/opencv.hpp"
using namespace cv;
class FrameDifferenceBGS
{
public:
Mat prev;
Mat diff;
bool enableThreshold;
bool showOutput;
bool firstTime;
uchar threshold;
FrameDifferenceBGS():firstTime(false),enableThreshold(false),showOutput(false),threshold(0)
{
}
void FrameDifferenceBGS::operator()(cv::Mat& _in, cv::Mat &_fg, double _lr)
{
if(_in.empty())
return;
if(prev.empty())
{
prev=_in.clone();
_fg=cv::Mat::zeros(_in.size(),CV_8UC1);
return;
}
cv::absdiff(prev, _in, diff);
if(diff.channels() == 3)
cv::cvtColor(diff, _fg, CV_BGR2GRAY);
else
_fg=diff;
if(enableThreshold)
cv::threshold(_fg, _fg, threshold, 255, cv::THRESH_BINARY);
if(showOutput)
cv::imshow("Frame Difference", _fg);
prev=_in.clone();
firstTime = false;
}
};
int main()
{
VideoCapture cap(0);
FrameDifferenceBGS bgs;
Mat frame,fg;
for(;;)
{
cap >> frame;
bgs(frame,fg,0);
imshow("frame", frame);
imshow("fg", fg);
if(waitKey(1) ==27) exit(0);
}
return 0;
}
edit 2(modified original)
#include "opencv2/opencv.hpp"
class FrameDifferenceBGS
{
public:
cv::Mat img_input_prev;
cv::Mat diff;
cv::Mat img_foreground;//put this in class in stead of inside the function
bool enableThreshold;
bool showOutput;
bool firstTime;
uchar threshold;
FrameDifferenceBGS():firstTime(false),enableThreshold(false),showOutput(false),threshold(0)
{
}
void FrameDifferenceBGS::operator()(cv::InputArray _image, cv::OutputArray _fgmask, double learningRate)
{
cv::Mat img_input = _image.getMat();
if(img_input.empty())
return;
if(_fgmask.empty())
_fgmask.create(img_input.size(), CV_8UC1);
if(img_input_prev.empty())
{
img_input.copyTo(img_input_prev);
return;
}
cv::absdiff(img_input_prev, img_input, img_foreground);
if(img_foreground.channels() == 3)
cv::cvtColor(img_foreground, _fgmask, CV_BGR2GRAY);
if(enableThreshold)
cv::threshold(img_foreground, img_foreground, threshold, 255, cv::THRESH_BINARY);
if(showOutput)
cv::imshow("Frame Difference", img_foreground);
img_input.copyTo(img_input_prev);
//img_foreground.copyTo(_fgmask);
firstTime = false;
}
};
int main()
{
cv::VideoCapture cap(0);
FrameDifferenceBGS bgs;
cv::Mat frame,fg;
for(;;)
{
cap >> frame;
bgs(frame,fg,0);
cv::imshow("frame", frame);
cv::imshow("fg", fg);
if(cv::waitKey(1) ==27) exit(0);
}
return 0;
}