I am using background subtraction for detecting moving vehicles in OpenCV.
The moving object is detected and a rectangle is created around the detected object.
I input the video having moving objects in it.
The issue is :
I don't know how to calculate the moving object speed. I tried searching over forums, Google, StackOverflow but didn't got any idea on how to calculate the speed.
I want to implement the same as it is implemented in this YouTube video
Here is my code:
BgDetection.cpp
#include "BgDetection.h"
int BgDetection1();
using namespace cv;
int BgDetection1()
{
cv::Mat frame;
cv::Mat back;
cv::Mat fore;
CvSeq* seq;
cv::VideoCapture cap("D:/Eclipse/bglib/video2.avi");
cap >> frame;
cv::initModule_video();
cv::BackgroundSubtractorMOG2 bg(100, 16, true); // history is an int, distance_threshold is an int (usually set to 16), shadow_detection is a bool
bg.set("nmixtures", 3);
bg(frame, fore, -1); //learning_rate = -1 here
std::vector<std::vector<cv::Point> > contours;
cv::namedWindow("Frame");
cv::namedWindow("Background");
for(;;)
{
cap >> frame;
bg.operator ()(frame,fore);
bg.getBackgroundImage(back);
cv::erode(fore,fore,cv::Mat());
cv::dilate(fore,fore,cv::Mat());
std::vector<cv::Vec4i> hierarchy;
cv::findContours( fore, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE, cvPoint(600,200));
for ( size_t i=0; i<contours.size(); ++i )
{
cv::drawContours( frame, contours, i, Scalar(200,0,0), 1, 8, hierarchy, 0, Point() );
cv::Rect brect = cv::boundingRect(contours[i]);
cv::rectangle(frame, brect, Scalar(255,0,0));
}
//cv::drawContours(frame,contours,-1,cv::Scalar(0,0,255),2);
cv::imshow("Frame",frame);
cv::imshow("Background",back);
if(cv::waitKey(30) >= 0) break;
}
return 0;
}
BgDetection.h
#ifndef BGDETECTION_H_INCLUDED
#define BGDETECTION_H_INCLUDED
#include <iostream>
#include <sys/stat.h>
#include <stdio.h>
#include <conio.h>
#include <string.h>
#include <stdlib.h>
#include <opencv/cv.h>
#include "opencv2/features2d/features2d.hpp"
#include <opencv/highgui.h>
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <vector>
#pragma comment (lib , "opencv_core244d.lib")
#pragma comment (lib ,"opencv_highgui244d.lib")
#pragma comment(lib , "opencv_imgproc244d.lib")
#pragma comment(lib ,"opencv_video244.lib")
int BgDetection1();
#endif // BGDETECTION_H_INCLUDED
main.cpp
#include <iostream>
#include "BgDetection.h"
using namespace std;
int main()
{
cout << BgDetection1() << endl;
return 0;
}
Any help appreciated.
Single object
If you are tracking a single rectangle around your moving object, the rectangle has a unique centre in each frame.
The difference between the centre positions could potentially be used to generate instantaneous velocity vectors.
My memory of opencv syntax in c++ is a bit rusty, but something along the lines of
// outside t-loop
cap >> frame;
bg.operator ()(frame,fore);
bg.getBackgroundImage(back);
cv::erode(fore,fore,cv::Mat());
cv::dilate(fore,fore,cv::Mat());
std::vector<cv::Vec4i> hierarchy;
cv::findContours( fore, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
int i =0;
cv::drawContours( frame, contours, i, Scalar(200,0,0), 1, 8, hierarchy, 0, Point() );
cv::Rect rectold = cv::boundingRect(contours[i]);
cv::rectangle(frame, rectold, Scalar(255,0,0));
//cv::drawContours(frame,contours,-1,cv::Scalar(0,0,255),2);
cv::imshow("Frame",frame);
cv::imshow("Background",back);
if(cv::waitKey(30) >= 0) break;
// Within t-loop
cv::Rect newrect = cv::boundingRect(contours[i]);
double vx = newrect.x - oldrect.x;
double vy = newrect.y - oldrect.y;
oldrect = newrect;
Multiple object
If you have multiple objects, you could generate a point list for the objects in frame t and t+1 and then do point set matching on the two point sets.
Depending on the tracking complexity I'd suggest
a simple nearest neighbour matching if the assignment is essentially trivial
Global nearest neighbours (e.g. Jonkers-Volgenant http://www.assignmentproblems.com/LAPJV.htm) for something more difficult
If that still doesn't work you'll probably have to delve into state estimation (see the Kalman filter for a basic example) and devise a cost function before calling LAPJV.
Related
I am trying to implement background subtraction in OpenCV 2.4.10 using mog2. My aim is to segment the hand using background subtraction. Unfortunately, the first frame that is used as foreground appear to be stuck during live capture from the webcam. Here is the code that I used for this simple project
#include "stdafx.h"
#include <opencv2\opencv.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <opencv2\video\video.hpp>
#include <opencv2\core\core.hpp>
#include <iostream>
#include <sstream>
#include <string.h>
using namespace cv;
int main()
{
Mat frame, mask, gray;
BackgroundSubtractorMOG2 mog2;
VideoCapture cap(0);
if (cap.isOpened()){
while (true)
{
if (cap.read(frame))
{
imshow("frame", frame);
cvtColor(frame, gray, cv::COLOR_RGB2GRAY);
imshow("gray", gray);
mog2(gray, mask, 0.0);// 0.1 is learning rate
imshow("Background Subtraction", mask);
if (waitKey(30) >= 0)
break;
}
}
}
cap.release();
return 0;
}
Here is the output
This is because your fist happens to be in the very first frame, thus when you move your hand, you get 2 difference images- one from the new position of your palm, and the other from the old location of the fist, now occupied by the actual background behind it.
I would suggest that you shouldn't have your hand in the first frames
I'm trying to separate the background (green field and light green towel) from the objects using OpenCV so I segmented the following image manually:
By bordering the objects in red and coloring blue the connected components which should not be taken into consideration as you can see in the bottom right of the image:
After threasholding on 254 the channels R and B, I got the following:
Channel Red
Channel Blue
If I fulfill the all contours of red channel using
findContours( bordersRed, contoursRedChannel, hierarchyRedChannel, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
for (int index = 0; index < contoursRedChannel.size(); index ++)
{
drawContours( bordersRed, contoursRedChannel, index, colorForMask, CV_FILLED, 8, hierarchyRedChannel, 0, cv::Point() );
}
the bottom right corner will be like:
But what I need is to ignore the contours that contains only blue points in order to have something like:
so I have to combine the red with blue channels to get it but don't know how yet. Any advice would be appreciated.
Thanks.
You can do that using floodFill, assuming you know a point inside the shape you want to fill.
Result starting from your "Channel red":
Code:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
// Your image
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
// Assume you know a point inside the shape
Point seed(930, 370);
// Apply floodfill
floodFill(img, seed, Scalar(255));
// Show image
imshow("Result", img);
waitKey();
return 0;
}
UPDATE
Once you fill the contours in both masks with drawContours(... CV_FILLED), you can simply XOR the two mask:
Code:
#include <opencv2\opencv.hpp>
#include <vector>
#include <algorithm>
using namespace std;
using namespace cv;
int main()
{
// Load the two mask
Mat1b channel_red_mask = imread("channel_red.png", IMREAD_GRAYSCALE);
Mat1b channel_blue_mask = imread("channel_blue.png", IMREAD_GRAYSCALE);
// Use just the bottom right part
Rect roi(Point(800, 270), Point(channel_red_mask.cols, channel_red_mask.rows));
channel_red_mask = channel_red_mask(roi).clone();
channel_blue_mask = channel_blue_mask(roi).clone();
// Fill all contours, in both masks
{
vector<vector<Point>> contours;
findContours(channel_red_mask.clone(), contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); ++i)
{
drawContours(channel_red_mask, contours, i, Scalar(255), CV_FILLED);
}
}
{
vector<vector<Point>> contours;
findContours(channel_blue_mask.clone(), contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); ++i)
{
drawContours(channel_blue_mask, contours, i, Scalar(255), CV_FILLED);
}
}
// XOR the masks
Mat1b xored = channel_red_mask ^ channel_blue_mask;
imshow("XOR", xored);
waitKey();
return 0;
}
The following code for finding contours in an image does not give any compilation errors. However, on running I get the error
"Open cv:Assertion failed (size.width > 0 && size.height > 0)" in the OpenCV imshow file.
I tried the code with just the imshow function, removing everything after it, and the code runs fine, hence the file location does not seem to be a problem!
Any help would be much appreciated.
Thanks in advance!
#include <opencv\cv.h>
#include <opencv2\highgui\highgui.hpp>
#include <opencv\cvaux.h>
#include <opencv\cxcore.h>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
#include <conio.h>
using namespace cv;
using namespace std;
int main() {
Mat img1;
Mat output;
Mat img = imread("blue.jpg");
cvtColor(img, img1, CV_BGR2GRAY);
threshold(img1, output, 176, 255, CV_THRESH_BINARY);
imshow("hi", output);
vector<vector<Point>> Contours;
vector<Vec4i> hier;
Mat final;
findContours(img1, Contours, hier, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
for (int i = 0; i < Contours.size(); i++)
{
drawContours(final, Contours, i, Scalar(0, 255, 0), 1, 8, hier);
}
imshow("result", final);
waitKey();
}
You are drawing to a non initialized matrix (final) here:
Mat final;
....
drawContours(final, Contours, i, Scalar(0, 255, 0), 1, 8, hier);
You should initialize final first, like:
Mat final = img.clone();
I've been trying to set up a program that uses HSV to detect a specific colour and place a rectangle around the biggest area found of said colour. I am having problems when i get to the vectors and the "findContours" function, which stops the program with an error stating "xxx.exe has triggered a break point".
This only happens when I insert the line
findContours(dilatedBlue,contours,hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
I'm a rookie when it comes to OpenCV and C++ but I'm hoping someone will be able to help. I'm using windows 7 x64, Visual Studio 2012 and OpenCV 2.4.8.
My full code is what follows:
#include "stdafx.h"
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat, Scalar)
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
int _tmain(int argc, _TCHAR* argv[])
{
VideoCapture cap(0); // open the default camera
if(!cap.isOpened()) // check if we succeeded
return -1;
// Declare Local Variables
int Quit = 0;
Mat imageDiff;
Mat HSVMatrix[3];
Mat erodedBlue;
Mat dilatedBlue;
//Start of search/shoot routine
while( Quit != 'q' ) // For as long as User wishes to continue...
{
Mat image;
cap >> image; // get a new frame from camera
imshow("Original", image);
//GaussianBlur(image, image, Size(3,3), 1.5, 1.5); // Setting up a Gaussian blur using 5x5 grid
cvtColor(image, imageDiff, CV_BGR2HSV);
split(imageDiff,HSVMatrix);
//Setting up saturation values for colour blue
threshold(HSVMatrix[0],HSVMatrix[0],130,255,THRESH_TOZERO_INV); // High value of colour - Dynamic changing of variable to select mode 130
threshold(HSVMatrix[0],HSVMatrix[0],75,255,THRESH_BINARY); // Low value of colour 75
threshold(HSVMatrix[1],HSVMatrix[1],255,255,THRESH_TOZERO_INV); // High saturation value - Modify for varying levels of light
threshold(HSVMatrix[1],HSVMatrix[1],100,255,THRESH_BINARY); // Low saturation value
HSVMatrix[2]=HSVMatrix[1]&HSVMatrix[0];
imshow("HSVblue", HSVMatrix[2]); // Displays binarised image
erode(HSVMatrix[2], HSVMatrix[2], Mat(), Point(-1, -1), 2, 1, 1);
erode(HSVMatrix[2], erodedBlue, Mat(), Point(-1, -1), 2, 1, 1);
imshow("Eroded-Blue", erodedBlue);
dilate(erodedBlue, dilatedBlue, Mat(), Point(-1, -1), 2, 1, 1);
imshow("Dilated-Blue", dilatedBlue);
vector<vector<Point> > contours; // Temporary variables used to hold contour information
vector<Point> approx;
vector<Vec4i> hierarchy;
vector<Rect> boundRect(contours.size());
findContours(dilatedBlue,contours,hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true) * 0.02, true); // Calculation of contours and their approximate size and sides
}
for (int i = 0; i < contours.size(); i++)
{
if ((approx.size() == 4) && (fabs(contourArea(Mat(approx))) > 100)) // The properties that have to be fulfilled in order for the object to be recognized by the computer
{
Rect outline; // If the properties have been fulfilled and the object has been recognized, display an outline around the object
outline = boundingRect(approx);
rectangle(image,Rect(outline),Scalar(0, 255, 255),5,8,0); // The properties of the outline: the colour[bgr] and thickness
}
}
imshow("Target", image);
}
Quit = cvWaitKey(1); // The program waits for the key "q" to be pressed to exit
}
cap.release();
return 0;
}
I'm trying to play with my webcam and OpenCV.
I follow this tuto : http://mateuszstankiewicz.eu/?p=189.
But the only result I have is one red border and I don't understand why. Could anyone help me to make it right and fix this ?
Here is my code :
#include "mvt_detection.h"
Mvt_detection::Mvt_detection()
{
}
Mvt_detection::~Mvt_detection()
{
}
cv::Mat Mvt_detection::start(cv::Mat frame)
{
cv::Mat back;
cv::Mat fore;
cv::BackgroundSubtractorMOG2 bg(5,3,true) ;
cv::namedWindow("Background");
std::vector<std::vector<cv::Point> > contours;
bg.operator ()(frame,fore);
bg.getBackgroundImage(back);
cv::erode(fore,fore,cv::Mat());
cv::dilate(fore,fore,cv::Mat());
cv::findContours(fore,contours,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_NONE);
cv::drawContours(frame,contours,-1,cv::Scalar(0,0,255),2);
return frame;
}
Here is a screenshot of what our cam returns :
I tried on two other video from there and there and there is the same issue.
Thanks for the help :).
As #Lenjyco said, we fixe the problem.
#Micka had a good idea :
Firstly the BackgroundSubtractorMOG2 as to be instancied only ONCE.
We instantiate it in the constructor and play with the Hystory and Threashold:
Mvt_detection::Mvt_detection()
{
bg = new cv::BackgroundSubtractorMOG2(10, 16, false);
}
10 : the number of image the backgound look back to compare.
16 : the threshold level (blur)
This way, we are now able to detect motion.
Thank you !
I have used the following code which is similar to yours and it is working well. I am also taking the inputs from my webcam. In your code, i didnt find any imshow() and waitkey. Try to use them. My code is following:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/video/background_segm.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdio.h>
#include <iostream>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
VideoCapture cap;
bool update_bg_model = true;
cap.open(0);
cv::BackgroundSubtractorMOG2 bg;//(100, 3, 0.3, 5);
bg.set ("nmixtures", 3);
std::vector < std::vector < cv::Point > >contours;
cv::namedWindow ("Frame");
cv::namedWindow ("Background");
Mat frame, fgmask, fgimg, backgroundImage;
for(;;)
{
cap >> frame;
bg.operator()(frame, fgimg);
bg.getBackgroundImage (backgroundImage);
cv::erode (fgimg, fgimg, cv::Mat ());
cv::dilate (fgimg, fgimg, cv::Mat ());
cv::findContours (fgimg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
cv::drawContours (frame, contours, -1, cv::Scalar (0, 0, 255), 2);
cv::imshow ("Frame", frame);
cv::imshow ("Background", backgroundImage);
char k = (char)waitKey(30);
if( k == 27 ) break;
}
return 0;
}
Problem fixed, putting BackgroundSubtractorMOG2 in my object's field and initialise it in constructor make him work well.