Kalman Filter with acceleration - c++

I am trying to implement a Kalman filter based mouse tracking (as a test first) using a velocity-acceleration model.
I want to try this out this simple model, my state transition equations are:
X(k) = [x(k), y(k)]' (Position)
V(k) = [vx(k), vy(k)]' (Velocity)
X(k) = X(k-1) + dt*V(k-1) + 0.5*dt*dt*a(k-1)
V(k) = V(k-1) + t*a(k-1)
a(k) = a(k-1)
Using this I have basically wrote down the following piece of code:
#include <iostream>
#include <vector>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
using namespace cv;
using namespace std;
struct mouse_info_struct { int x,y; };
struct mouse_info_struct mouse_info = {-1,-1}, last_mouse;
void on_mouse(int event, int x, int y, int flags, void* param)
{
//if (event == CV_EVENT_LBUTTONUP)
{
last_mouse = mouse_info;
mouse_info.x = x;
mouse_info.y = y;
}
}
void printmat(const cv::Mat &__mat, std::string __str)
{
std::cout << "--------" << __str << "----------\n";
for (int i=0 ; i<__mat.rows ; ++i)
{
for (int j=0 ; j<__mat.cols ; ++j)
std::cout << __mat.at<double>(i,j) << " ";
std::cout << std::endl;
}
std::cout << "-------------------------------------\n";
}
int main (int argc, char * const argv[])
{
int nStates = 5, nMeasurements = 2, nInputs = 1;
Mat img(500, 900, CV_8UC3);
KalmanFilter KF(nStates, nMeasurements, nInputs, CV_64F);
Mat state(nStates, 1, CV_64F); /* (x, y, Vx, Vy, a) */
Mat measurement(nMeasurements,1,CV_64F); measurement.setTo(Scalar(0));
Mat prevMeasurement(nMeasurements,1,CV_64F); prevMeasurement.setTo(Scalar(0));
int key = -1, dt=100, T=1000;
float /*a=100, acclErrMag = 0.05,*/ measurementErrVar = 100, noiseVal=0.001, covNoiseVal=0.9e-4;
namedWindow("Mouse-Kalman");
setMouseCallback("Mouse-Kalman", on_mouse, 0);
//while ( (char)(key=cv::waitKey(100)) != 'q' )
{
/// A
KF.transitionMatrix.at<double>(0,0) = 1;
KF.transitionMatrix.at<double>(0,1) = 0;
KF.transitionMatrix.at<double>(0,2) = (dt/T);
KF.transitionMatrix.at<double>(0,3) = 0;
KF.transitionMatrix.at<double>(0,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(1,0) = 0;
KF.transitionMatrix.at<double>(1,1) = 1;
KF.transitionMatrix.at<double>(1,2) = 0;
KF.transitionMatrix.at<double>(1,3) = (dt/T);
KF.transitionMatrix.at<double>(1,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(2,0) = 0;
KF.transitionMatrix.at<double>(2,1) = 0;
KF.transitionMatrix.at<double>(2,2) = 1;
KF.transitionMatrix.at<double>(2,3) = 0;
KF.transitionMatrix.at<double>(2,4) = (dt/T);
KF.transitionMatrix.at<double>(3,0) = 0;
KF.transitionMatrix.at<double>(3,1) = 0;
KF.transitionMatrix.at<double>(3,2) = 0;
KF.transitionMatrix.at<double>(3,3) = 1;
KF.transitionMatrix.at<double>(3,4) = (dt/T);
KF.transitionMatrix.at<double>(4,0) = 0;
KF.transitionMatrix.at<double>(4,1) = 0;
KF.transitionMatrix.at<double>(4,2) = 0;
KF.transitionMatrix.at<double>(4,3) = 0;
KF.transitionMatrix.at<double>(4,4) = 1;
/// Initial estimate of state variables
KF.statePost = cv::Mat::zeros(nStates, 1,CV_64F);
KF.statePost.at<double>(0) = mouse_info.x;
KF.statePost.at<double>(1) = mouse_info.y;
KF.statePost.at<double>(2) = 0;
KF.statePost.at<double>(3) = 0;
KF.statePost.at<double>(4) = 0;
KF.statePre = KF.statePost;
/// Ex or Q
setIdentity(KF.processNoiseCov, Scalar::all(noiseVal));
/// Initial covariance estimate Sigma_bar(t) or P'(k)
setIdentity(KF.errorCovPre, Scalar::all(1000));
/// Sigma(t) or P(k)
setIdentity(KF.errorCovPost, Scalar::all(1000));
/// B
KF.controlMatrix = cv::Mat(nStates, nInputs,CV_64F);
KF.controlMatrix.at<double>(0,0) = 0;
KF.controlMatrix.at<double>(1,0) = 0;
KF.controlMatrix.at<double>(2,0) = 0;
KF.controlMatrix.at<double>(3,0) = 0;
KF.controlMatrix.at<double>(4,0) = 1;
/// H
KF.measurementMatrix = cv::Mat::eye(nMeasurements, nStates, CV_64F);
/// Ez or R
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementErrVar*measurementErrVar));
printmat(KF.controlMatrix, "KF.controlMatrix");
printmat(KF.transitionMatrix, "KF.transitionMatrix");
printmat(KF.statePre,"KF.statePre");
printmat(KF.processNoiseCov, "KF.processNoiseCov");
printmat(KF.measurementMatrix, "KF.measurementMatrix");
printmat(KF.measurementNoiseCov, "KF.measurementNoiseCov");
printmat(KF.errorCovPost,"KF.errorCovPost");
printmat(KF.errorCovPre,"KF.errorCovPre");
printmat(KF.statePost,"KF.statePost");
while (mouse_info.x < 0 || mouse_info.y < 0)
{
imshow("Mouse-Kalman", img);
waitKey(30);
continue;
}
while ( (char)key != 's' )
{
/// MAKE A MEASUREMENT
measurement.at<double>(0) = mouse_info.x;
measurement.at<double>(1) = mouse_info.y;
/// MEASUREMENT UPDATE
Mat estimated = KF.correct(measurement);
/// STATE UPDATE
Mat prediction = KF.predict();
cv::Mat u(nInputs,1,CV_64F);
u.at<double>(0,0) = 0.0 * sqrt(pow((prevMeasurement.at<double>(0) - measurement.at<double>(0)),2)
+ pow((prevMeasurement.at<double>(1) - measurement.at<double>(1)),2));
/// STORE ALL DATA
Point predictPt(prediction.at<double>(0),prediction.at<double>(1));
Point estimatedPt(estimated.at<double>(0),estimated.at<double>(1));
Point measuredPt(measurement.at<double>(0),measurement.at<double>(1));
/// PLOT POINTS
#define drawCross( center, color, d ) \
line( img, Point( center.x - d, center.y - d ), \
Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
line( img, Point( center.x + d, center.y - d ), \
Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )
/// DRAW ALL ON IMAGE
img = Scalar::all(0);
drawCross( predictPt, Scalar(255,255,255), 9 ); //WHITE
drawCross( estimatedPt, Scalar(0,0,255), 6 ); //RED
drawCross( measuredPt, Scalar(0,255,0), 3 ); //GREEN
line( img, estimatedPt, measuredPt, Scalar(100,255,255), 3, CV_AA, 0 );
line( img, estimatedPt, predictPt, Scalar(0,255,255), 3, CV_AA, 0 );
prevMeasurement = measurement;
imshow( "Mouse-Kalman", img );
key=cv::waitKey(10);
}
}
return 0;
}
Here is the output of the code: http://www.youtube.com/watch?v=9_xd4HSz8_g
As you can see that the tracking very very slow. I don't understand what is wrong with the model and why the estimation is so slow. I don't expect there should be any control input.
Can anyone explain this?

I have modified my code and I am posting it for those who want to tweak it to play around for more. The main problem was the choice of covariances.
#include <iostream>
#include <vector>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
using namespace cv;
using namespace std;
struct mouse_info_struct { int x,y; };
struct mouse_info_struct mouse_info = {-1,-1}, last_mouse;
vector<Point> mousev,kalmanv;
int trackbarProcessNoiseCov = 10, trackbarMeasurementNoiseCov = 10, trackbarStateEstimationErrorCov = 10;
float processNoiseCov=10, measurementNoiseCov = 1000, stateEstimationErrorCov = 50;
int trackbarProcessNoiseCovMax=10000, trackbarMeasurementNoiseCovMax = 10000,
trackbarStateEstimationErrorCovMax = 5000;
float processNoiseCovMin=0, measurementNoiseCovMin = 0,
stateEstimationErrorCovMin = 0;
float processNoiseCovMax=100, measurementNoiseCovMax = 5000,
stateEstimationErrorCovMax = 5000;
int nStates = 5, nMeasurements = 2, nInputs = 1;
KalmanFilter KF(nStates, nMeasurements, nInputs, CV_64F);
void on_mouse(int event, int x, int y, int flags, void* param)
{
last_mouse = mouse_info;
mouse_info.x = x;
mouse_info.y = y;
}
void on_trackbarProcessNoiseCov( int, void* )
{
processNoiseCov = processNoiseCovMin +
(trackbarProcessNoiseCov * (processNoiseCovMax-processNoiseCovMin)/trackbarProcessNoiseCovMax);
setIdentity(KF.processNoiseCov, Scalar::all(processNoiseCov));
std::cout << "\nProcess Noise Cov: " << processNoiseCov;
std::cout << "\nMeasurement Noise Cov: " << measurementNoiseCov << std::endl;
}
void on_trackbarMeasurementNoiseCov( int, void* )
{
measurementNoiseCov = measurementNoiseCovMin +
(trackbarMeasurementNoiseCov * (measurementNoiseCovMax-measurementNoiseCovMin)/trackbarMeasurementNoiseCovMax);
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementNoiseCov));
std::cout << "\nProcess Noise Cov: " << processNoiseCov;
std::cout << "\nMeasurement Noise Cov: " << measurementNoiseCov << std::endl;
}
int main (int argc, char * const argv[])
{
Mat img(500, 1000, CV_8UC3);
Mat state(nStates, 1, CV_64F);/* (x, y, Vx, Vy, a) */
Mat measurementNoise(nMeasurements, 1, CV_64F), processNoise(nStates, 1, CV_64F);
Mat measurement(nMeasurements,1,CV_64F); measurement.setTo(Scalar(0.0));
Mat noisyMeasurement(nMeasurements,1,CV_64F); noisyMeasurement.setTo(Scalar(0.0));
Mat prevMeasurement(nMeasurements,1,CV_64F); prevMeasurement.setTo(Scalar(0.0));
Mat prevMeasurement2(nMeasurements,1,CV_64F); prevMeasurement2.setTo(Scalar(0.0));
int key = -1, dt=50, T=1000;
namedWindow("Mouse-Kalman");
setMouseCallback("Mouse-Kalman", on_mouse, 0);
createTrackbar( "Process Noise Cov", "Mouse-Kalman", &trackbarProcessNoiseCov,
trackbarProcessNoiseCovMax, on_trackbarProcessNoiseCov );
createTrackbar( "Measurement Noise Cov", "Mouse-Kalman", &trackbarMeasurementNoiseCov,
trackbarMeasurementNoiseCovMax, on_trackbarMeasurementNoiseCov );
on_trackbarProcessNoiseCov( trackbarProcessNoiseCov, 0 );
on_trackbarMeasurementNoiseCov( trackbarMeasurementNoiseCov, 0 );
//while ( (char)(key=cv::waitKey(100)) != 'q' )
{
/// A (TRANSITION MATRIX INCLUDING VELOCITY AND ACCELERATION MODEL)
KF.transitionMatrix.at<double>(0,0) = 1;
KF.transitionMatrix.at<double>(0,1) = 0;
KF.transitionMatrix.at<double>(0,2) = (dt/T);
KF.transitionMatrix.at<double>(0,3) = 0;
KF.transitionMatrix.at<double>(0,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(1,0) = 0;
KF.transitionMatrix.at<double>(1,1) = 1;
KF.transitionMatrix.at<double>(1,2) = 0;
KF.transitionMatrix.at<double>(1,3) = (dt/T);
KF.transitionMatrix.at<double>(1,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(2,0) = 0;
KF.transitionMatrix.at<double>(2,1) = 0;
KF.transitionMatrix.at<double>(2,2) = 1;
KF.transitionMatrix.at<double>(2,3) = 0;
KF.transitionMatrix.at<double>(2,4) = (dt/T);
KF.transitionMatrix.at<double>(3,0) = 0;
KF.transitionMatrix.at<double>(3,1) = 0;
KF.transitionMatrix.at<double>(3,2) = 0;
KF.transitionMatrix.at<double>(3,3) = 1;
KF.transitionMatrix.at<double>(3,4) = (dt/T);
KF.transitionMatrix.at<double>(4,0) = 0;
KF.transitionMatrix.at<double>(4,1) = 0;
KF.transitionMatrix.at<double>(4,2) = 0;
KF.transitionMatrix.at<double>(4,3) = 0;
KF.transitionMatrix.at<double>(4,4) = 1;
/// Initial estimate of state variables
KF.statePost = cv::Mat::zeros(nStates, 1,CV_64F);
KF.statePost.at<double>(0) = mouse_info.x;
KF.statePost.at<double>(1) = mouse_info.y;
KF.statePost.at<double>(2) = 0.1;
KF.statePost.at<double>(3) = 0.1;
KF.statePost.at<double>(4) = 0.1;
KF.statePre = KF.statePost;
state = KF.statePost;
/// Ex or Q (PROCESS NOISE COVARIANCE)
setIdentity(KF.processNoiseCov, Scalar::all(processNoiseCov));
/// Initial covariance estimate Sigma_bar(t) or P'(k)
setIdentity(KF.errorCovPre, Scalar::all(stateEstimationErrorCov));
/// Sigma(t) or P(k) (STATE ESTIMATION ERROR COVARIANCE)
setIdentity(KF.errorCovPost, Scalar::all(stateEstimationErrorCov));
/// B (CONTROL MATRIX)
KF.controlMatrix = cv::Mat(nStates, nInputs,CV_64F);
KF.controlMatrix.at<double>(0,0) = /*0.5*(dt/T)*(dt/T);//*/0;
KF.controlMatrix.at<double>(1,0) = /*0.5*(dt/T)*(dt/T);//*/0;
KF.controlMatrix.at<double>(2,0) = 0;
KF.controlMatrix.at<double>(3,0) = 0;
KF.controlMatrix.at<double>(4,0) = 1;
/// H (MEASUREMENT MATRIX)
KF.measurementMatrix = cv::Mat::eye(nMeasurements, nStates, CV_64F);
/// Ez or R (MEASUREMENT NOISE COVARIANCE)
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementNoiseCov));
while (mouse_info.x < 0 || mouse_info.y < 0)
{
imshow("Mouse-Kalman", img);
waitKey(30);
continue;
}
prevMeasurement.at<double>(0,0) = 0;
prevMeasurement.at<double>(1,0) = 0;
prevMeasurement2 = prevMeasurement;
while ( (char)key != 's' )
{
/// STATE UPDATE
Mat prediction = KF.predict();
/// MAKE A MEASUREMENT
measurement.at<double>(0) = mouse_info.x;
measurement.at<double>(1) = mouse_info.y;
/// MEASUREMENT NOISE SIMULATION
randn( measurementNoise, Scalar(0),
Scalar::all(sqrtf(measurementNoiseCov)));
noisyMeasurement = measurement + measurementNoise;
/// MEASUREMENT UPDATE
Mat estimated = KF.correct(noisyMeasurement);
cv::Mat u(nInputs,1,CV_64F);
u.at<double>(0,0) = 0.0 * sqrtf(pow((prevMeasurement.at<double>(0) - measurement.at<double>(0)),2)
+ pow((prevMeasurement.at<double>(1) - measurement.at<double>(1)),2));
/// STORE ALL DATA
Point noisyPt(noisyMeasurement.at<double>(0),noisyMeasurement.at<double>(1));
Point estimatedPt(estimated.at<double>(0),estimated.at<double>(1));
Point measuredPt(measurement.at<double>(0),measurement.at<double>(1));
/// PLOT POINTS
#define drawCross( center, color, d ) \
line( img, Point( center.x - d, center.y - d ), \
Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
line( img, Point( center.x + d, center.y - d ), \
Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )
/// DRAW ALL ON IMAGE
img = Scalar::all(0);
drawCross( noisyPt, Scalar(255,255,255), 9 ); //WHITE
drawCross( estimatedPt, Scalar(0,0,255), 6 ); //RED
drawCross( measuredPt, Scalar(0,255,0), 3 ); //GREEN
line( img, estimatedPt, measuredPt, Scalar(100,255,255), 3, CV_AA, 0 );
line( img, estimatedPt, noisyPt, Scalar(0,255,255), 3, CV_AA, 0 );
imshow( "Mouse-Kalman", img );
key=cv::waitKey(dt);
prevMeasurement = measurement;
}
}
return 0;
}

Related

How to split image in OpenCV based on lines

I am trying to do text segmentation. The attachment below is the results of it.
I manage to form lines to divide the image. However, I am stuck in splitting the image according to the lines that I'd found.
As labeled (red text) in the attached picture, I would like to split the image into 5 different images and I do not know where should I start. All the method I found only work for straight lines.
Header
Code - Source:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
#include <ctype.h>
#include <fstream>
#define _USE_MATH_DEFINES
#include <math.h>
#define JC_VORONOI_IMPLEMENTATION
#include "jc_voronoi.h"
typedef struct compPoint
{
cv::Point pointer;
int siteNum, size ;
};
int maximumSize;
float average=0;
std::vector<compPoint> generatePoint(cv::Mat image);
void generateVoronoi(std::vector<cv::Point> points, int width, int height);
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale);
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
float areaDifference(int s1,int s2);
float areaDifference(int s1, int s2)
{
if (s1 > s2)
{
return s1 / s2;
}
else
{
return s2 / s1;
}
}
std::vector<compPoint> generatePoint(cv::Mat image)
{
cv::Mat grayscale, binary;
cv::cvtColor(image, grayscale, cv::COLOR_BGR2GRAY);
cv::threshold(grayscale, binary, 190, 255, 1);
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(binary, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_NONE, cv::Point(0, 0));
std::vector<compPoint> extractedPoint;
cv::Mat drawing = cv::Mat::zeros(binary.size(), CV_8UC3);
cv::Scalar color = cv::Scalar(255, 255, 255);
maximumSize = cv::contourArea(contours[0]);
int skip = 0;
for (int i = 0; i < contours.size(); i++)
{
int jumpPoint = contours[i].size() / (contours[i].size() * 0.12);
bool isInner = false;
cv::Vec4i currentHierarchy = hierarchy[i];
if (contours[i].size() <= 20) //Remove small component
continue;
for (int g = 0; g < contours[i].size(); g = g + jumpPoint) //Sample point from connected component
{
compPoint temp;
temp.pointer = contours[i].at(g);
line(drawing, contours[i].at(g), contours[i].at(g), color, 1, 8, 0);
if (currentHierarchy.val[3] != -1)
{
int currentIndex = currentHierarchy.val[3];
while (hierarchy[currentIndex].val[3] != -1)
{
currentIndex = hierarchy[currentIndex].val[3];
}
temp.siteNum = currentIndex;
temp.size = cv::contourArea(contours[currentIndex]);
isInner = true;
}
else
{
temp.siteNum = i;
temp.size = cv::contourArea(contours[i]);
if (cv::contourArea(contours[i])>maximumSize)
{
maximumSize = cv::contourArea(contours[i]);
}
}
extractedPoint.push_back(temp);
}
if (isInner == false)
{
average = average + cv::contourArea(contours[i]);
skip++;
}
}
average = average/skip;
return extractedPoint;
}
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale)
{
jcv_point p;
p.x = (pt->x - min->x) / (max->x - min->x) * scale->x;
p.y = (pt->y - min->y) / (max->y - min->y) * scale->y;
return p;
}
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
if (x < 0 || y < 0 || x >(width - 1) || y >(height - 1))
return;
int index = y * width * nchannels + x * nchannels;
for (int i = 0; i < nchannels; ++i)
{
image[index + i] = color[i];
}
}
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
int dx = abs(x1 - x0), sx = x0<x1 ? 1 : -1;
int dy = -abs(y1 - y0), sy = y0<y1 ? 1 : -1;
int err = dx + dy, e2; // error value e_xy
for (;;)
{ // loop
plot(x0, y0, image, width, height, nchannels, color);
if (x0 == x1 && y0 == y1) break;
e2 = 2 * err;
if (e2 >= dy) { err += dy; x0 += sx; } // e_xy+e_x > 0
if (e2 <= dx) { err += dx; y0 += sy; } // e_xy+e_y < 0
}
}
void generateVoronoi(std::vector<compPoint> points, int width, int height)
{
int size = points.size();
jcv_point* voronoiPoint = (jcv_point*)malloc(sizeof(jcv_point) * (size_t)size);
for (int i = 0; i < size; i++)
{
voronoiPoint[i].x = (float)points[i].pointer.x;
voronoiPoint[i].y = (float)points[i].pointer.y;
voronoiPoint[i].site = points[i].siteNum;
voronoiPoint[i].totalPoint = points[i].size;
}
jcv_rect* rect = 0;
size_t imagesize = (size_t)(width*height * 3);
unsigned char* image = (unsigned char*)malloc(imagesize);
unsigned char* image2 = (unsigned char*)malloc(imagesize);
memset(image, 0, imagesize);
unsigned char color_pt[] = { 255, 255, 255 };
unsigned char color_line[] = { 220, 220, 220 };
jcv_diagram diagram;
jcv_point dimensions;
dimensions.x = (jcv_real)width;
dimensions.y = (jcv_real)height;
memset(&diagram, 0, sizeof(jcv_diagram));
jcv_diagram_generate(size, (const jcv_point*)voronoiPoint, rect, &diagram);
//Edge
const jcv_edge* edge = jcv_diagram_get_edges(&diagram);
std::vector<filtered_edge> filteredEdge;
float min_x = 0.0, min_y = 0.0;
while (edge) //Remove edge from the same connected component
{
jcv_point p0 = edge->pos[0];
jcv_point p1 = edge->pos[1];
if (edge->sites[0]->p.site != edge->sites[1]->p.site)
{
filteredEdge.push_back(jcv_save_edge(edge));
min_x = min_x + abs(edge->sites[0]->p.x - edge->sites[1]->p.x);
min_y = min_y + abs(edge->sites[0]->p.y - edge->sites[1]->p.y);
}
edge = edge->next;
}
min_x = min_x / filteredEdge.size();
min_y = min_y / filteredEdge.size();
std::vector<filtered_edge> selectedEdge;
for (int i = 0; i < filteredEdge.size(); i++)
{
jcv_point p0 = remap(&filteredEdge.at(i).pos[0], &diagram.min, &diagram.max, &dimensions);
jcv_point p1 = remap(&filteredEdge.at(i).pos[1], &diagram.min, &diagram.max, &dimensions);
float site_x = abs(filteredEdge.at(i).sites[0]->p.x - filteredEdge.at(i).sites[1]->p.x);
float site_y = abs(filteredEdge.at(i).sites[0]->p.y - filteredEdge.at(i).sites[1]->p.y);
float x_difference = abs(filteredEdge.at(i).pos[0].x- filteredEdge.at(i).pos[1].x);
float y_difference = abs(filteredEdge.at(i).pos[0].y - filteredEdge.at(i).pos[1].y);
float areaDiff = areaDifference(filteredEdge.at(i).sites[0]->p.totalPoint, filteredEdge.at(i).sites[1]->p.totalPoint);
if (p0.x - p1.x == 0 && p0.y - p1.y == 0.0) //Remove short edges
continue;
if (areaDiff > 20) //Keep edge between small(text) and big(image) component
{
float difference = abs(filteredEdge.at(i).sites[0]->p.totalPoint - filteredEdge.at(i).sites[1]->p.totalPoint);
if (difference > average*4 )
{
unsigned char color_line2[] = { 0, 220, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
continue;
}
}
if (x_difference > y_difference) //Remove edge between close component
{
if (site_y > min_y*1.6)
{
unsigned char color_line2[] = { 220, 0, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
}
}
else
{
if (site_x > min_x*2.5)
{
unsigned char color_line2[] = { 220, 220, 0 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
}
}
}
jcv_diagram_free(&diagram);
for (int i = 0; i < size; ++i)
{
jcv_point p = remap(&voronoiPoint[i], &diagram.min, &diagram.max, &dimensions);
plot((int)p.x, (int)p.y, image, width, height, 3, color_pt);
}
free(voronoiPoint);
cv::Mat segmentedImg = cv::Mat(height, width, CV_8UC3, image);
cv::imshow("Testing", segmentedImg);
cv::waitKey(0);
free(image);
}
int main()
{
cv::Mat image, skewCorrected;
image = cv::imread("C:\\figure5.PNG");
if (!image.data)
{
std::cout << "Error" << std::endl;
system("PAUSE");
return 0;
}
std::vector<compPoint> points = generatePoint(image);
int width = image.size().width, height = image.size().height;
generateVoronoi(points, width, height);
cv::waitKey(0);
}
Input image:
I don't understand many things in your code so I just appended some lines to do what you want.
1 - Create a Mat of zeros to draw the lines (CV_8U)
Mat dst = cv::Mat(height, width, CV_8U, cvScalar(0.));
2 - Draw the lines (using your points)
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
3 - Close the "holes" between the lines (CLOSE morphology operation)
int morph_size = 20; // adjust this values to your image
Mat element = getStructuringElement( MORPH_RECT, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
// Apply the CLOSE morphology operation
morphologyEx( dst, closed, MORPH_CLOSE, element );
4 - Flood fill to a mask (= "painting" the splitted areas)
// iterate through the points
for (int i = 0; i < closed.rows; i++ ) {
for (int j = 0; j < closed.cols; j++) {
// if point is not "painted" yet
if (closed.at<uchar>(i, j) == 0) {
// copy Mat before Flood fill
Mat previous_closed = closed.clone();
// Flood fill that seed point ("paint" that area)
floodFill(closed, Point(j, i), 255);
// Get mask with the "painted" area
Mat mask = closed - previous_closed;
/// Copy from segmentedImg using the mask
Mat outputMat;
segmentedImg.copyTo(outputMat, mask);
cv::imshow("Closed lines", closed);
imshow("Splitted Area", outputMat);
waitKey(0);
break;
}
}
}
Area 1:
Area 2:
Area 3:
... And so on, for the 5 areas, that loop basically keeps on painting the "black areas" in white and creating mats given the difference before and after each flood fill.
Full code (your code + this lines):
#include <opencv2/core/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/opencv.hpp>
using namespace cv;
#define JC_VORONOI_IMPLEMENTATION
#include "jc_voronoi.h"
typedef struct compPoint
{
cv::Point pointer;
int siteNum, size ;
};
int maximumSize;
float average=0;
std::vector<compPoint> generatePoint(cv::Mat image);
void generateVoronoi(std::vector<cv::Point> points, int width, int height);
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale);
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color);
float areaDifference(int s1,int s2);
float areaDifference(int s1, int s2)
{
if (s1 > s2)
{
return s1 / s2;
}
else
{
return s2 / s1;
}
}
std::vector<compPoint> generatePoint(cv::Mat image)
{
cv::Mat grayscale, binary;
cv::cvtColor(image, grayscale, cv::COLOR_BGR2GRAY);
cv::threshold(grayscale, binary, 190, 255, 1);
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
cv::findContours(binary, contours, hierarchy, cv::RETR_TREE, cv::CHAIN_APPROX_NONE, cv::Point(0, 0));
std::vector<compPoint> extractedPoint;
cv::Mat drawing = cv::Mat::zeros(binary.size(), CV_8UC3);
cv::Scalar color = cv::Scalar(255, 255, 255);
maximumSize = cv::contourArea(contours[0]);
int skip = 0;
for (int i = 0; i < contours.size(); i++)
{
int jumpPoint = contours[i].size() / (contours[i].size() * 0.12);
bool isInner = false;
cv::Vec4i currentHierarchy = hierarchy[i];
if (contours[i].size() <= 20) //Remove small component
continue;
for (int g = 0; g < contours[i].size(); g = g + jumpPoint) //Sample point from connected component
{
compPoint temp;
temp.pointer = contours[i].at(g);
line(drawing, contours[i].at(g), contours[i].at(g), color, 1, 8, 0);
if (currentHierarchy.val[3] != -1)
{
int currentIndex = currentHierarchy.val[3];
while (hierarchy[currentIndex].val[3] != -1)
{
currentIndex = hierarchy[currentIndex].val[3];
}
temp.siteNum = currentIndex;
temp.size = cv::contourArea(contours[currentIndex]);
isInner = true;
}
else
{
temp.siteNum = i;
temp.size = cv::contourArea(contours[i]);
if (cv::contourArea(contours[i])>maximumSize)
{
maximumSize = cv::contourArea(contours[i]);
}
}
extractedPoint.push_back(temp);
}
if (isInner == false)
{
average = average + cv::contourArea(contours[i]);
skip++;
}
}
average = average/skip;
return extractedPoint;
}
static inline jcv_point remap(const jcv_point* pt, const jcv_point* min, const jcv_point* max, const jcv_point* scale)
{
jcv_point p;
p.x = (pt->x - min->x) / (max->x - min->x) * scale->x;
p.y = (pt->y - min->y) / (max->y - min->y) * scale->y;
return p;
}
static void plot(int x, int y, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
if (x < 0 || y < 0 || x >(width - 1) || y >(height - 1))
return;
int index = y * width * nchannels + x * nchannels;
for (int i = 0; i < nchannels; ++i)
{
image[index + i] = color[i];
}
}
static void draw_line(int x0, int y0, int x1, int y1, unsigned char* image, int width, int height, int nchannels, unsigned char* color)
{
int dx = abs(x1 - x0), sx = x0<x1 ? 1 : -1;
int dy = -abs(y1 - y0), sy = y0<y1 ? 1 : -1;
int err = dx + dy, e2; // error value e_xy
for (;;)
{ // loop
plot(x0, y0, image, width, height, nchannels, color);
if (x0 == x1 && y0 == y1) break;
e2 = 2 * err;
if (e2 >= dy) { err += dy; x0 += sx; } // e_xy+e_x > 0
if (e2 <= dx) { err += dx; y0 += sy; } // e_xy+e_y < 0
}
}
void generateVoronoi(std::vector<compPoint> points, int width, int height)
{
/// 1 - Create Mat of zeros to draw the lines
Mat dst = cv::Mat(height,width, CV_8U, cvScalar(0.));
int size = points.size();
jcv_point* voronoiPoint = (jcv_point*)malloc(sizeof(jcv_point) * (size_t)size);
for (int i = 0; i < size; i++)
{
voronoiPoint[i].x = (float)points[i].pointer.x;
voronoiPoint[i].y = (float)points[i].pointer.y;
voronoiPoint[i].site = points[i].siteNum;
voronoiPoint[i].totalPoint = points[i].size;
}
jcv_rect* rect = 0;
size_t imagesize = (size_t)(width*height * 3);
unsigned char* image = (unsigned char*)malloc(imagesize);
memset(image, 0, imagesize);
unsigned char color_pt[] = { 255, 255, 255 };
jcv_diagram diagram;
jcv_point dimensions;
dimensions.x = (jcv_real)width;
dimensions.y = (jcv_real)height;
memset(&diagram, 0, sizeof(jcv_diagram));
jcv_diagram_generate(size, (const jcv_point*)voronoiPoint, rect, &diagram);
//Edge
const jcv_edge* edge = jcv_diagram_get_edges(&diagram);
std::vector<filtered_edge> filteredEdge;
float min_x = 0.0, min_y = 0.0;
while (edge) //Remove edge from the same connected component
{
jcv_point p0 = edge->pos[0];
jcv_point p1 = edge->pos[1];
if (edge->sites[0]->p.site != edge->sites[1]->p.site)
{
filteredEdge.push_back(jcv_save_edge(edge));
min_x = min_x + abs(edge->sites[0]->p.x - edge->sites[1]->p.x);
min_y = min_y + abs(edge->sites[0]->p.y - edge->sites[1]->p.y);
}
edge = edge->next;
}
min_x = min_x / filteredEdge.size();
min_y = min_y / filteredEdge.size();
std::vector<filtered_edge> selectedEdge;
for (int i = 0; i < filteredEdge.size(); i++)
{
jcv_point p0 = remap(&filteredEdge.at(i).pos[0], &diagram.min, &diagram.max, &dimensions);
jcv_point p1 = remap(&filteredEdge.at(i).pos[1], &diagram.min, &diagram.max, &dimensions);
float site_x = abs(filteredEdge.at(i).sites[0]->p.x - filteredEdge.at(i).sites[1]->p.x);
float site_y = abs(filteredEdge.at(i).sites[0]->p.y - filteredEdge.at(i).sites[1]->p.y);
float x_difference = abs(filteredEdge.at(i).pos[0].x- filteredEdge.at(i).pos[1].x);
float y_difference = abs(filteredEdge.at(i).pos[0].y - filteredEdge.at(i).pos[1].y);
float areaDiff = areaDifference(filteredEdge.at(i).sites[0]->p.totalPoint, filteredEdge.at(i).sites[1]->p.totalPoint);
if (p0.x - p1.x == 0 && p0.y - p1.y == 0.0) //Remove short edges
continue;
/// 2 - Draw lines
if (areaDiff > 20) //Keep edge between small(text) and big(image) component
{
float difference = abs(filteredEdge.at(i).sites[0]->p.totalPoint - filteredEdge.at(i).sites[1]->p.totalPoint);
if (difference > average*4 )
{
unsigned char color_line2[] = { 0, 220, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
continue;
}
}
if (x_difference > y_difference) //Remove edge between close component
{
if (site_y > min_y*1.6)
{
unsigned char color_line2[] = { 220, 0, 220 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
}
}
else
{
if (site_x > min_x*2.5)
{
unsigned char color_line2[] = { 220, 220, 0 };
selectedEdge.push_back(filteredEdge.at(i));
draw_line((int)p0.x, (int)p0.y, (int)p1.x, (int)p1.y, image, width, height, 3, color_line2);
line( dst, Point((int)p0.x, (int)p0.y), Point((int)p1.x, (int)p1.y), Scalar( 255, 255, 255 ), 1, 8);
}
}
}
jcv_diagram_free(&diagram);
for (int i = 0; i < size; ++i)
{
jcv_point p = remap(&voronoiPoint[i], &diagram.min, &diagram.max, &dimensions);
plot((int)p.x, (int)p.y, image, width, height, 3, color_pt);
}
free(voronoiPoint);
cv::Mat segmentedImg = cv::Mat(height, width, CV_8UC3, image);
cv::imshow("Testing", segmentedImg);
cv::imshow("Lines", dst);
/// New code:
Mat closed = dst.clone();
/// 3 - Close the "holes" between the lines
int morph_size = 20; // adjust this values to your image
Mat element = getStructuringElement( MORPH_RECT, Size( 2*morph_size + 1, 2*morph_size+1 ), Point( morph_size, morph_size ) );
// Apply the CLOSE morphology operation
morphologyEx( dst, closed, MORPH_CLOSE, element );
imshow("Closed lines", closed);
waitKey(0);
/// 4 - Flood fill to a mask
// iterate through the points
for (int i = 0; i < closed.rows; i++ ) {
for (int j = 0; j < closed.cols; j++) {
// if point is not "painted" yet
if (closed.at<uchar>(i, j) == 0) {
// copy Mat before Flood fill
Mat previous_closed = closed.clone();
// Flood fill that seed point ("paint" that area)
floodFill(closed, Point(j, i), 255);
// Get mask with the "painted" area
Mat mask = closed - previous_closed;
/// 5 - Copy from segmentedImg using the mask
Mat outputMat;
segmentedImg.copyTo(outputMat, mask);
cv::imshow("Closed lines", closed);
imshow("Splitted Area", outputMat);
waitKey(0);
break;
}
}
}
free(image);
}
int main()
{
cv::Mat image, skewCorrected;
image = cv::imread("/home/tribta/Downloads/HI2IT.png");
if (!image.data)
{
std::cout << "Error" << std::endl;
system("PAUSE");
return 0;
}
std::vector<compPoint> points = generatePoint(image);
int width = image.size().width, height = image.size().height;
generateVoronoi(points, width, height);
cv::waitKey(0);
}

Gradient Descent for Linear Regression not minimizing perfectly

I am trying to implement the basic gradient descent algorithm on my uniformly distributed training set. As the data is uniform so the partition line should be diagonal, but i am getting a line as in below figure. In the figure circles are my data points and the line represent the cost function(h(x)).
I am using OpenCV just for output nothing else. I am using below equation:-
#include <iostream>
#include <unistd.h>
#include <cv.h>
#include <highgui.h>
#define WIN_WIDTH 500
#define WIN_HEIGHT 500
#define MAX_POINTS 500
using namespace std;
using namespace cv;
void getPoints(vector<Point> &randPoints, int size)
{
for (int i = 20; i < WIN_HEIGHT; i+=20)
{
for (int j = 20; j < WIN_WIDTH; j+=20)
{
int x = i;
int y = j;
Point pt = Point(x, y);
randPoints.push_back(pt);
}
}
}
void gradientDescent( double &th1, double &th2, double &alpha, vector<Point> &pointVec)
{
int size = pointVec.size();
double sum1 = 0.0, sum2 = 0.0;
for (int i = 0; i < size; i++)
{
sum1 += (th1 + th2 * pointVec[i].x) - pointVec[i].y;
sum2 += ((th1 + th2 * pointVec[i].x) - pointVec[i].y) * pointVec[i].x;
}
th1 = th1 - ((alpha/( double)size) * sum1);
th2 = th2 - ((alpha/( double)size) * sum2);
}
int main(int argc, char**argv)
{
Mat img(WIN_WIDTH, WIN_HEIGHT, CV_8UC3);
img = Scalar(255, 255, 255);
vector<Point> randPoints;
getPoints(randPoints, MAX_POINTS);
int size = randPoints.size();
cout << "Training size = " << randPoints.size() << endl;
for (int i = 0; i < size; i++)
circle(img, randPoints[i], 4, Scalar(255, 0, 0), 1, 8);
double theta1 = 0, theta2 = 0.25, alpha = 0.0000001;
if (argc > 2)
{
theta1 = atof(argv[1]);
theta2 = atof(argv[2]);
}
int countConv = 0, prevY = 0;
cout << "Theta0 = " << theta1 << " Theta1 = " << theta2 << endl;
cout << "Learning rate = " << alpha << endl;
Mat tmpImg(WIN_WIDTH, WIN_HEIGHT, CV_8UC3);
while(1)
{
gradientDescent(theta1, theta2, alpha, randPoints);
int x = WIN_WIDTH+WIN_HEIGHT;
int y = theta1 + (theta2 * x);
int x1 = WIN_WIDTH-200;
int y1 = theta1 + theta2*x1;
img.copyTo(tmpImg);
circle(tmpImg, Point(x1, y1), 4, Scalar(0, 0, 255), -1, 8);
char text[64];
sprintf(text, "(%d, %d)", x1, y1);
putText(tmpImg, text, Point(x1+3, y1+3), FONT_HERSHEY_SCRIPT_SIMPLEX, 0.4, Scalar(0, 255, 0), 1, 8);
line(tmpImg, Point(0, theta1), Point(x, y), Scalar(0, 0, 255));
imshow("Gradient Descent", tmpImg);
waitKey(33);
}
imshow("Gradient Descent", tmpImg);
waitKey(0);
return 0;
}

Adding scale to partical filter

The following code is for partical filter for mouse and I change it for track on video with color, this works.
But I want to add scale to it now which only works with x and y. I tried to add scale to it but I failed. Please help me to add scale to the object detected to partical filter.
// Module "core"
#include <opencv2/core/core.hpp>
#include < opencv2/video/background_segm.hpp>
// Module "highgui"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/legacy/legacy.hpp>
// Module "imgproc"
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/videostab/videostab.hpp"
// Module "video"
#include <opencv2/video/video.hpp>
// Output
#include <iostream>
// Vector
#include <vector>
#define drawCross( center, color, d ) \
line( frame, cv::Point( center.x - d, center.y - d ), \
cv::Point( center.x + d, center.y + d ), color, 1, CV_AA, 0); \
line( frame, cv::Point( center.x + d, center.y - d ), \
cv::Point( center.x - d, center.y + d ), color, 1, CV_AA, 0 )
#define PLOT_PARTICLES 1
using namespace std;
using namespace cv;
// >>>>> Color to be tracked
#define MIN_H_BLUE 200
#define MAX_H_BLUE 300
// <<<<< Color to be tracked
vector<cv::Point> mouseV, particleV;
int main()
{
// Camera frame
cv::Mat frame;
char code = (char)-1;
cv::namedWindow("mouse particle");
cv::Mat_<float> measurement(2,1);
measurement.setTo(cv::Scalar(0));
int dim = 2;
int nParticles = 300;
float xRange = 650.0;
float yRange = 650.0;
float minRange[] = { 0, 0 };
float maxRange[] = { xRange, yRange };
CvMat LB, UB;
cvInitMatHeader(&LB, 2, 1, CV_32FC1, minRange);
cvInitMatHeader(&UB, 2, 1, CV_32FC1, maxRange);
CvConDensation* condens = cvCreateConDensation(dim, dim, nParticles);
cvConDensInitSampleSet(condens, &LB, &UB);
condens->DynamMatr[0] = 1.0;
condens->DynamMatr[1] = 0.0;
condens->DynamMatr[2] = 0.0;
condens->DynamMatr[3] = 1.0;
// Camera Index
string idx = "a.mp4";
// Camera Capture
cv::VideoCapture cap;
// >>>>> Camera Settings
if (!cap.open(idx))
{
cout << "Webcam not connected.\n" << "Please verify\n";
return EXIT_FAILURE;
}
cap.set(CV_CAP_PROP_FRAME_WIDTH, 1024);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 768);
// <<<<< Camera Settings
cout << "\nHit 'q' to exit...\n";
char ch = 0;
double ticks = 0;
bool found = false;
int notFoundCount = 0;
// >>>>> Main loop
while (ch != 'q' && ch != 'Q')
{
double precTick = ticks;
ticks = (double) cv::getTickCount();
double dT = (ticks - precTick) / cv::getTickFrequency(); //seconds
// Frame acquisition
cap >> frame;
mouseV.clear();
particleV.clear();
// >>>>> Noise smoothing
cv::Mat blur;
cv::GaussianBlur(frame, blur, cv::Size(5, 5), 3.0, 3.0);
// <<<<< Noise smoothing
// >>>>> HSV conversion
cv::Mat frmHsv;
cv::cvtColor(blur, frmHsv, CV_BGR2HSV);
// <<<<< HSV conversion
// >>>>> Color Thresholding
// Note: change parameters for different colors
cv::Mat rangeRes = cv::Mat::zeros(frame.size(), CV_8UC1);
cv::inRange(frmHsv, cv::Scalar(MIN_H_BLUE / 2, 100, 80),
cv::Scalar(MAX_H_BLUE / 2, 255, 255), rangeRes);
// <<<<< Color Thresholding
// >>>>> Improving the result
cv::erode(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
cv::dilate(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
// <<<<< Improving the result
// >>>>> Contours detection
vector<vector<cv::Point> > contours;
cv::findContours(rangeRes, contours, CV_RETR_EXTERNAL,
CV_CHAIN_APPROX_NONE);
// <<<<< Contours detection
// >>>>> Filtering
vector<vector<cv::Point> > balls;
vector<cv::Rect> ballsBox;
for (size_t i = 0; i < contours.size(); i++)
{
cv::Rect bBox;
bBox = cv::boundingRect(contours[i]);
float ratio = (float) bBox.width / (float) bBox.height;
if (ratio > 1.0f)
ratio = 1.0f / ratio;
// Searching for a bBox almost square
// if (ratio > 0.55 && bBox.area() >= 50)
// {
balls.push_back(contours[i]);
ballsBox.push_back(bBox);
measurement(0) = bBox.x;
measurement(1) = bBox.y;
measurement(2) = ballsBox.size();
//cout << "Balls found:" << bBox.x << endl;
// }
}
/*
cout << "Balls found:" << ballsBox.size() << endl;
*/
cv::Point measPt(measurement(0),measurement(1));
mouseV.push_back(measPt);
for (int i = 0; i < condens->SamplesNum; i++) {
float diffX = (measurement(0) - condens->flSamples[i][0])/xRange;
float diffY = (measurement(1) - condens->flSamples[i][1])/yRange;
condens->flConfidence[i] = 1.0 / (sqrt(diffX * diffX + diffY * diffY));
// plot particles
#ifdef PLOT_PARTICLES
cv::Point partPt(condens->flSamples[i][0], condens->flSamples[i][1]);
drawCross(partPt , cv::Scalar(255,0,255), 2);
#endif
}
cvConDensUpdateByTime(condens);
cv::Point statePt(condens->State[0], condens->State[1]);
particleV.push_back(statePt);
for (int i = 0; i < particleV.size() - 1; i++) {
line(frame, particleV[i], particleV[i+1], cv::Scalar(0,255,0), 1);
}
drawCross( statePt, cv::Scalar(255,255,255), 5 );
drawCross( measPt, cv::Scalar(0,0,255), 5 );
for (size_t i = 0; i < balls.size(); i++)
{
cv::drawContours(frame, balls, i, CV_RGB(20,150,20), 1);
cv::rectangle(frame, ballsBox[i], CV_RGB(0,255,0), 2);
cv::Point center;
center.x = ballsBox[i].x + ballsBox[i].width / 2;
center.y = ballsBox[i].y + ballsBox[i].height / 2;
cv::circle(frame, center, 2, CV_RGB(20,150,20), -1);
stringstream sstr;
sstr << "(" << center.x << "," << center.y << ")";
cv::putText(frame, sstr.str(),
cv::Point(center.x + 3, center.y - 3),
cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(20,150,20), 2);
}
cv::imshow("mouse particle", frame);
cv::imshow("ssssssss", rangeRes);
ch = cv::waitKey(1);
}
// <<<<< Main loop
return EXIT_SUCCESS;
}

How to get extra information of blobs with SimpleBlobDetector?

#robot_sherrick answered me this question, this is a follow-up question for his answer.
cv::SimpleBlobDetector in Opencv 2.4 looks very exciting but I am not sure I can make it work for more detailed data extraction.
I have the following concerns:
if this only returns center of the blob, I can't have an entire, labelled Mat, can I?
how can I access the features of the detected blobs like area, convexity, color and so on?
can I display an exact segmentation with this? (like with say, waterfall)
So the code should look something like this:
cv::Mat inputImg = imread(image_file_name, CV_LOAD_IMAGE_COLOR); // Read a file
cv::SimpleBlobDetector::Params params;
params.minDistBetweenBlobs = 10.0; // minimum 10 pixels between blobs
params.filterByArea = true; // filter my blobs by area of blob
params.minArea = 20.0; // min 20 pixels squared
params.maxArea = 500.0; // max 500 pixels squared
SimpleBlobDetector myBlobDetector(params);
std::vector<cv::KeyPoint> myBlobs;
myBlobDetector.detect(inputImg, myBlobs);
If you then want to have these keypoints highlighted on your image:
cv::Mat blobImg;
cv::drawKeypoints(inputImg, myBlobs, blobImg);
cv::imshow("Blobs", blobImg);
To access the info in the keypoints, you then just access each element like so:
for(std::vector<cv::KeyPoint>::iterator blobIterator = myBlobs.begin(); blobIterator != myBlobs.end(); blobIterator++){
std::cout << "size of blob is: " << blobIterator->size << std::endl;
std::cout << "point is at: " << blobIterator->pt.x << " " << blobIterator->pt.y << std::endl;
}
Note: this has not been compiled and may have typos.
Here is a version that will allow you to get the last contours back, via the getContours() method. They will match up by index to the keypoints.
class BetterBlobDetector : public cv::SimpleBlobDetector
{
public:
BetterBlobDetector(const cv::SimpleBlobDetector::Params &parameters = cv::SimpleBlobDetector::Params());
const std::vector < std::vector<cv::Point> > getContours();
protected:
virtual void detectImpl( const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask=cv::Mat()) const;
virtual void findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
std::vector<Center> &centers, std::vector < std::vector<cv::Point> >&contours) const;
};
Then cpp
using namespace cv;
BetterBlobDetector::BetterBlobDetector(const SimpleBlobDetector::Params &parameters)
{
}
void BetterBlobDetector::findBlobs(const cv::Mat &image, const cv::Mat &binaryImage,
vector<Center> &centers, std::vector < std::vector<cv::Point> >&curContours) const
{
(void)image;
centers.clear();
curContours.clear();
std::vector < std::vector<cv::Point> >contours;
Mat tmpBinaryImage = binaryImage.clone();
findContours(tmpBinaryImage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
for (size_t contourIdx = 0; contourIdx < contours.size(); contourIdx++)
{
Center center;
center.confidence = 1;
Moments moms = moments(Mat(contours[contourIdx]));
if (params.filterByArea)
{
double area = moms.m00;
if (area < params.minArea || area >= params.maxArea)
continue;
}
if (params.filterByCircularity)
{
double area = moms.m00;
double perimeter = arcLength(Mat(contours[contourIdx]), true);
double ratio = 4 * CV_PI * area / (perimeter * perimeter);
if (ratio < params.minCircularity || ratio >= params.maxCircularity)
continue;
}
if (params.filterByInertia)
{
double denominator = sqrt(pow(2 * moms.mu11, 2) + pow(moms.mu20 - moms.mu02, 2));
const double eps = 1e-2;
double ratio;
if (denominator > eps)
{
double cosmin = (moms.mu20 - moms.mu02) / denominator;
double sinmin = 2 * moms.mu11 / denominator;
double cosmax = -cosmin;
double sinmax = -sinmin;
double imin = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmin - moms.mu11 * sinmin;
double imax = 0.5 * (moms.mu20 + moms.mu02) - 0.5 * (moms.mu20 - moms.mu02) * cosmax - moms.mu11 * sinmax;
ratio = imin / imax;
}
else
{
ratio = 1;
}
if (ratio < params.minInertiaRatio || ratio >= params.maxInertiaRatio)
continue;
center.confidence = ratio * ratio;
}
if (params.filterByConvexity)
{
vector < Point > hull;
convexHull(Mat(contours[contourIdx]), hull);
double area = contourArea(Mat(contours[contourIdx]));
double hullArea = contourArea(Mat(hull));
double ratio = area / hullArea;
if (ratio < params.minConvexity || ratio >= params.maxConvexity)
continue;
}
center.location = Point2d(moms.m10 / moms.m00, moms.m01 / moms.m00);
if (params.filterByColor)
{
if (binaryImage.at<uchar> (cvRound(center.location.y), cvRound(center.location.x)) != params.blobColor)
continue;
}
//compute blob radius
{
vector<double> dists;
for (size_t pointIdx = 0; pointIdx < contours[contourIdx].size(); pointIdx++)
{
Point2d pt = contours[contourIdx][pointIdx];
dists.push_back(norm(center.location - pt));
}
std::sort(dists.begin(), dists.end());
center.radius = (dists[(dists.size() - 1) / 2] + dists[dists.size() / 2]) / 2.;
}
centers.push_back(center);
curContours.push_back(contours[contourIdx]);
}
static std::vector < std::vector<cv::Point> > _contours;
const std::vector < std::vector<cv::Point> > BetterBlobDetector::getContours() {
return _contours;
}
void BetterBlobDetector::detectImpl(const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat&) const
{
//TODO: support mask
_contours.clear();
keypoints.clear();
Mat grayscaleImage;
if (image.channels() == 3)
cvtColor(image, grayscaleImage, CV_BGR2GRAY);
else
grayscaleImage = image;
vector < vector<Center> > centers;
vector < vector<cv::Point> >contours;
for (double thresh = params.minThreshold; thresh < params.maxThreshold; thresh += params.thresholdStep)
{
Mat binarizedImage;
threshold(grayscaleImage, binarizedImage, thresh, 255, THRESH_BINARY);
vector < Center > curCenters;
vector < vector<cv::Point> >curContours, newContours;
findBlobs(grayscaleImage, binarizedImage, curCenters, curContours);
vector < vector<Center> > newCenters;
for (size_t i = 0; i < curCenters.size(); i++)
{
bool isNew = true;
for (size_t j = 0; j < centers.size(); j++)
{
double dist = norm(centers[j][ centers[j].size() / 2 ].location - curCenters[i].location);
isNew = dist >= params.minDistBetweenBlobs && dist >= centers[j][ centers[j].size() / 2 ].radius && dist >= curCenters[i].radius;
if (!isNew)
{
centers[j].push_back(curCenters[i]);
size_t k = centers[j].size() - 1;
while( k > 0 && centers[j][k].radius < centers[j][k-1].radius )
{
centers[j][k] = centers[j][k-1];
k--;
}
centers[j][k] = curCenters[i];
break;
}
}
if (isNew)
{
newCenters.push_back(vector<Center> (1, curCenters[i]));
newContours.push_back(curContours[i]);
//centers.push_back(vector<Center> (1, curCenters[i]));
}
}
std::copy(newCenters.begin(), newCenters.end(), std::back_inserter(centers));
std::copy(newContours.begin(), newContours.end(), std::back_inserter(contours));
}
for (size_t i = 0; i < centers.size(); i++)
{
if (centers[i].size() < params.minRepeatability)
continue;
Point2d sumPoint(0, 0);
double normalizer = 0;
for (size_t j = 0; j < centers[i].size(); j++)
{
sumPoint += centers[i][j].confidence * centers[i][j].location;
normalizer += centers[i][j].confidence;
}
sumPoint *= (1. / normalizer);
KeyPoint kpt(sumPoint, (float)(centers[i][centers[i].size() / 2].radius));
keypoints.push_back(kpt);
_contours.push_back(contours[i]);
}
}
//Access SimpleBlobDetector datas for video
#include "opencv2/imgproc/imgproc.hpp" //
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <vector>
#include <fstream>
#include <string>
#include <sstream>
#include <algorithm>
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/features2d/features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
const char* fileName ="C:/Users/DAGLI/Desktop/videos/new/m3.avi";
VideoCapture cap(fileName); //
if(!cap.isOpened()) //
{
cout << "Couldn't open Video " << fileName << "\n";
return -1;
}
for(;;) // videonun frameleri icin sonsuz dongu
{
Mat frame,labelImg;
cap >> frame;
if(frame.empty()) break;
//imshow("main",frame);
Mat frame_gray;
cvtColor(frame,frame_gray,CV_RGB2GRAY);
//////////////////////////////////////////////////////////////////////////
// convert binary_image
Mat binaryx;
threshold(frame_gray,binaryx,120,255,CV_THRESH_BINARY);
Mat src, gray, thresh, binary;
Mat out;
vector<KeyPoint> keyPoints;
SimpleBlobDetector::Params params;
params.minThreshold = 120;
params.maxThreshold = 255;
params.thresholdStep = 100;
params.minArea = 20;
params.minConvexity = 0.3;
params.minInertiaRatio = 0.01;
params.maxArea = 1000;
params.maxConvexity = 10;
params.filterByColor = false;
params.filterByCircularity = false;
src = binaryx.clone();
SimpleBlobDetector blobDetector( params );
blobDetector.create("SimpleBlob");
blobDetector.detect( src, keyPoints );
drawKeypoints( src, keyPoints, out, CV_RGB(255,0,0), DrawMatchesFlags::DEFAULT);
cv::Mat blobImg;
cv::drawKeypoints(frame, keyPoints, blobImg);
cv::imshow("Blobs", blobImg);
for(int i=0; i<keyPoints.size(); i++){
//circle(out, keyPoints[i].pt, 20, cvScalar(255,0,0), 10);
//cout<<keyPoints[i].response<<endl;
//cout<<keyPoints[i].angle<<endl;
//cout<<keyPoints[i].size()<<endl;
cout<<keyPoints[i].pt.x<<endl;
cout<<keyPoints[i].pt.y<<endl;
}
imshow( "out", out );
if ((cvWaitKey(40)&0xff)==27) break; // esc 'ye basilinca break
}
system("pause");
}

c++ algorithm for running a command after detecting square

I have opencv code for detecting square. And now I want after detect square, the code run another command.
Here is the code:
#include "cv.h"
#include "cxcore.h"
#include "highgui.h"
#include "math.h"
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <sstream>
using namespace std;
double angle( CvPoint* pt1, CvPoint* pt2, CvPoint* pt0 )
{
double dx1 = pt1->x - pt0->x;
double dy1 = pt1->y - pt0->y;
double dx2 = pt2->x - pt0->x;
double dy2 = pt2->y - pt0->y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
} // angle
IplImage* findSquares4( IplImage* img, CvMemStorage* storage )
{
double s = 0, t = 0;
int sdetect =0, sqdetect = 0,sqt = 0;
CvSeq* contours;
CvSeq* result;
CvSeq* squares = cvCreateSeq( 0, sizeof( CvSeq), sizeof( CvPoint), storage );
IplImage* cny = cvCreateImage(cvGetSize(img), 8, 1);
cvCanny(img, cny, 5, 100, 3);
cvNamedWindow("canny",CV_WINDOW_AUTOSIZE);
cvShowImage("canny",cny);
cvFindContours( cny, storage, &contours, sizeof( CvContour), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint( 0, 0) );
while( contours )
{
result = cvApproxPoly( contours,
sizeof( CvContour),
storage,
CV_POLY_APPROX_DP,
cvContourPerimeter( contours)*0.02, 0 );
if( result->total == 4 &&
fabs( cvContourArea( result, CV_WHOLE_SEQ)) > 1000 &&
cvCheckContourConvexity( result) )
{
s = 0;
for( int i = 2; i < 5; i++ )
{
{
t = fabs( angle(
( CvPoint*)cvGetSeqElem( result, i ),
( CvPoint*)cvGetSeqElem( result, i-2 ),
( CvPoint*)cvGetSeqElem( result, i-1 )));
s = s > t ? s : t;
cout << "s = " << s<< endl;
cout << "t = " << t<< endl;
}
} // for
if( s < 0.3 )
for( int i = 0; i < 4; i++ )
cvSeqPush( squares,
( CvPoint*)cvGetSeqElem( result, i ));
} // if
contours = contours->h_next;
} // while
if ((squares->total/4) = 1)
{
sdetect = 1;
} / /if
else
{
sdetect = 2;
sleep(0.5);
} // else
if (sqdetect != sdetect)
{
sqdetect=sdetect;
switch(sqdetect)
{
case 0 : system(NULL) ; break;
case 2 : cout<< "no "<< endl; break;
case 1 : system("./ambil1.sh"); break;
} // switch
} // if
sdetect = 0;
cout<<"Persegi : "<< squares->total/4 <<endl;
cvReleaseMemStorage(&storage);
cvClearSeq(squares);
} // findSquares4
void drawSquares(IplImage *img, CvSeq* squares )
{
CvFont font;
cvInitFont( &font, CV_FONT_HERSHEY_SIMPLEX, 0.4f, 0.4f, 0,1, 8 );
int i,j,sdetect,sqdetect = 0;
CvSeqReader reader;
cvStartReadSeq( squares, &reader, 0 );
for( i = 0; i < squares->total; i += 4 )
{
j++;
CvPoint pt[4], *rect = pt;
int count = 4;
// read 4 vertices
memcpy( pt, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 1, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 2, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
memcpy( pt + 3, reader.ptr, squares->elem_size );
CV_NEXT_SEQ_ELEM( squares->elem_size, reader );
cvPutText( img, "SQUARE", pt[i], &font, CV_RGB(20,255,0));
cvPolyLine( img, &rect, &count, 1, 1, CV_RGB(200,0,0), 4, CV_AA, 0 );
} // for
cvClearSeq(squares);
} // drawSquares
But what I got is that the program becomes laggy. So what is the right algorithm to place condition for executing system (./ambil1.sh)?
The program waits until the subordinate command finishes. During this time it will not respond.
If that's what you need, system is adequate.
If you want to fire and forget, you can either use the fork/exec pair, or just use the shell & operator to run stuff in the background.
If you need to fire, change your program's behaviour, then change it back when the subordinate command finishes, you almost have to use the fork/exec/SIGCHLD/waitpid combination.