Related
Here I got the code of extracting the orientation of the object in the image. I am new to OpenCV and C++. But I need to get this work done.
My question is, how to extract, write out the information of the angle and axis in this code?
#include "pch.h"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
double hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, LINE_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64F);
for (int i = 0; i < data_pts.rows; i++)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), PCA::DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
for (int i = 0; i < 2; i++)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(i);
}
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.02 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.02 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int argc, char** argv)
{
// Load image
CommandLineParser parser(argc, argv, "{#input | joint2.bmp | input image}");
parser.about("This program demonstrates how to use OpenCV PCA to extract the orientation of an object.\n");
parser.printMessage();
Mat src = imread(parser.get<String>("#input"));
// Check if image is loaded successfully
if (src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 200, 255, THRESH_BINARY | THRESH_OTSU);
// Find all the contours in the thresholded image
vector<vector<Point> > contours;
findContours(bw, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); i++)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2);
// Find the orientation of each shape
getOrientation(contours[i], src);
}
imshow("output", src);
waitKey();
return 0;
}
Here is the image of the object:
And here is the result:
As you see, it finds orientation correctly, but I need the information about the angle and which is which axis to be written.
Will be very grateful if someone knows how to do it!
EDIT: I have figured out how to find the information about the center, area, and the angle.
/// Get the moments
vector<Moments> mu(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i]);
}
/// Get the mass centers
vector<Point2f> mc(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
//add 1e-5 to avoid division by zero
mc[i] = Point2f(static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)),
static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)));
}
imshow("output", src);
cout << "\t Info: Area and angle \n";
for (size_t i = 0; i < contours.size(); i++)
{
cout << " * Contour[" << i << "] - Center: "<< mc[i]
<< " - Area: " << contourArea(contours[i]) << " - Angle: " << getOrientation(contours[i],src)*180/CV_PI << endl;
}
But still don't know how to denote which arrow is which axis in the image.
So I have figured out everything I need (almost).
Here is the final code:
#include "pch.h"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
string s = "";
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
double hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, LINE_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64F);
for (int i = 0; i < data_pts.rows; i++)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), PCA::DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
for (int i = 0; i < 2; i++)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(i);
}
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.01 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.005 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
putText(img, s = "Y-axis", p1, cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 100));
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
putText(img, s = "X-axis", p2/1.1 , cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 255));
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int argc, char** argv)
{
// Load image
CommandLineParser parser(argc, argv, "{#input | circle3.bmp | input image}");
parser.about("This program demonstrates how to use OpenCV PCA to extract the orientation of an object.\n");
parser.printMessage();
Mat src = imread(parser.get<String>("#input"));
// Check if image is loaded successfully
if (src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 70, 255, THRESH_BINARY | THRESH_OTSU);
// Find all the contours in the thresholded image
vector<vector<Point> > contours;
findContours(bw, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); i++)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2);
// Find the orientation of each shape
getOrientation(contours[i], src);
}
/// Get the moments
vector<Moments> mu(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i]);
}
/// Get the mass centers
vector<Point2f> mc(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
//add 1e-5 to avoid division by zero
mc[i] = Point2f(static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)),
static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)));
for (int i = 0; i < contours.size(); i++) {
std::stringstream ss; ss << i;
putText(src, ss.str(), mc[i] + Point2f(10,-10), cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 255));
}
}
imshow("output", src);
cout << "\t Info: Area and angle \n";
for (size_t i = 0; i < contours.size(); i++)
{
cout << " * Contour[" << i << "] - Center: "<< mc[i]
<< " - Area: " << contourArea(contours[i]) << " - Angle X: " << getOrientation(contours[i],src)*180/CV_PI << endl;
}
waitKey();
return 0;
}
The only thing I want to know how to draw the coordinate system in the corner of the image. Because I don't understand the angle results.
The results of the final code: https://imgur.com/l7t9bns
And relevant information: https://imgur.com/OuE79rR
I use this code to model Karhunen–Loève theorem but it doesn't work could some one help me to model this theorem using c++?
I am doing a research project on weather prediction and I need to extract cloud from visible satellite image. If some one know about this please help me. I used visual studio IDE, QT and Open-CV libraries.
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle;
double hypotenuse;
angle = atan2( (double) p.y - q.y, (double) p.x - q.x ); // angle in radians
hypotenuse = sqrt( (double) (p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// double degrees = angle * 180 / CV_PI; // convert radians to degrees (0-180 range)
// cout << "Degrees: " << abs(degrees - 180) << endl; // angle in 0-360 degrees range
// Here we lengthen the arrow by a factor of scale
q.x = (int) (p.x - scale * hypotenuse * cos(angle));
q.y = (int) (p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, CV_AA);
// create the arrow hooks
p.x = (int) (q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int) (q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, CV_AA);
p.x = (int) (q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int) (q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, CV_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64FC1);
for (int i = 0; i < data_pts.rows; ++i)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), CV_PCA_DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
for (int i = 0; i < 2; ++i)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i);
}
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.02 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.02 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int, char** argv)
{
// Load image
// Mat src = imread("pca_test1.jpg");
Mat src = imread(argv[1]);
// Check if image is loaded successfully
if(!src.data || src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 50, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// Find all the contours in the thresholded image
vector<Vec4i> hierarchy;
vector<vector<Point> > contours;
findContours(bw, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); ++i)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2, 8, hierarchy, 0);
// Find the orientation of each shape
getOrientation(contours[i], src);
}
imshow("output", src);
waitKey(0);
return 0;
}
I am trying to run a sample code from OpenCV for PCA from this link
PCA example.
But after I run it breaks down. I debugged and I saw that it breaks down inside below for loop which is in getOrientation function :
for (int i = 0; i < 2; ++i)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i);
}
I searched before in stackoverflow ,there were questions with similar title but not the same error. Any help? Thanks
Here is the sample code:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle;
double hypotenuse;
angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// double degrees = angle * 180 / CV_PI; // convert radians to degrees (0-180 range)
// cout << "Degrees: " << abs(degrees - 180) << endl; // angle in 0-360 degrees range
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, CV_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, CV_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, CV_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64FC1);
for (int i = 0; i < data_pts.rows; ++i)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), CV_PCA_DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
*for (int i = 0; i < 2; ++i)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i);
}*
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.02 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.02 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int, char** argv)
{
// Load image
Mat src = imread("C:/Users/aydin/Desktop/c++/pictures/pca_test1.jpg");
//Mat src = imread(argv[1]);
// Check if image is loaded successfully
if (!src.data || src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 50, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// Find all the contours in the thresholded image
vector<Vec4i> hierarchy;
vector<vector<Point> > contours;
findContours(bw, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); ++i)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2, 8, hierarchy, 0);
// Find the orientation of each shape
//getOrientation(contours[i], src);
}
imshow("output", src);
waitKey(0);
return 0;
}
No,it is not about environment ;I don't know,how you made it work ,but afterwards I saw same error here and he put the correct code saying that it had bugs.however comparing this code with original one I saw that in "for loop" that i mentioned as problematic
for (int i = 0; i < 2; ++i)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(0,i);
}
,he changed last paranthesis from (0,i) to (i) . I did same and it worked ,but I don't understand why,can you tell me why?
I need to binarize images with text.. It works very well but in some cases the output is empty (white image)
code
/*
* Compile
* # g++ txtbin.cpp -o txtbin `pkg-config opencv --cflags --libs`
*
* Run
* # ./txtbin input.jpg output.png
*/
#include "string"
#include "fstream"
#include "/usr/include/opencv2/opencv.hpp"
#include "/usr/include/boost/tuple/tuple.hpp"
using namespace std;
using namespace cv;
using namespace boost;
void CalcBlockMeanVariance(Mat& Img, Mat& Res, float blockSide=21, float contrast=0.01){
/*
* blockSide: set greater for larger fonts in image
* contrast: set smaller for lower contrast image
*/
Mat I;
Img.convertTo(I, CV_32FC1);
Res = Mat::zeros(Img.rows / blockSide, Img.cols / blockSide, CV_32FC1);
Mat inpaintmask;
Mat patch;
Mat smallImg;
Scalar m, s;
for(int i = 0; i < Img.rows - blockSide; i += blockSide){
for(int j = 0; j < Img.cols - blockSide; j += blockSide){
patch = I(Range(i, i + blockSide + 1), Range(j, j + blockSide + 1));
meanStdDev(patch, m, s);
if(s[0] > contrast){
Res.at<float>(i / blockSide, j / blockSide) = m[0];
}
else{
Res.at<float>(i / blockSide, j / blockSide) = 0;
}
}
}
resize(I, smallImg, Res.size());
threshold(Res, inpaintmask, 0.02, 1.0, THRESH_BINARY);
Mat inpainted;
smallImg.convertTo(smallImg, CV_8UC1, 255);
inpaintmask.convertTo(inpaintmask, CV_8UC1);
inpaint(smallImg, inpaintmask, inpainted, 5, INPAINT_TELEA);
resize(inpainted, Res, Img.size());
Res.convertTo(Res, CV_32FC1, 1.0 / 255.0);
}
tuple<int, int, int, int> detect_text_box(string input, Mat& res, bool draw_contours=false){
Mat large = imread(input);
bool test_output = false;
int
top = large.rows,
bottom = 0,
left = large.cols,
right = 0;
int
rect_bottom,
rect_right;
Mat rgb;
// downsample and use it for processing
pyrDown(large, rgb);
Mat small;
cvtColor(rgb, small, CV_BGR2GRAY);
// morphological gradient
Mat grad;
Mat morphKernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
morphologyEx(small, grad, MORPH_GRADIENT, morphKernel);
// binarize
Mat bw;
threshold(grad, bw, 0.0, 255.0, THRESH_BINARY | THRESH_OTSU);
// connect horizontally oriented regions
Mat connected;
morphKernel = getStructuringElement(MORPH_RECT, Size(9, 1));
morphologyEx(bw, connected, MORPH_CLOSE, morphKernel);
// find contours
Mat mask = Mat::zeros(bw.size(), CV_8UC1);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(connected, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
// filter contours
for(int idx = 0; idx >= 0; idx = hierarchy[idx][0]){
Rect rect = boundingRect(contours[idx]);
Mat maskROI(mask, rect);
maskROI = Scalar(0, 0, 0);
// fill the contour
drawContours(mask, contours, idx, Scalar(255, 255, 255), CV_FILLED);
// ratio of non-zero pixels in the filled region
double r = (double)countNonZero(maskROI) / (rect.width * rect.height);
// assume at least 45% of the area is filled if it contains text
if (r > 0.45 &&
(rect.height > 8 && rect.width > 8) // constraints on region size
// these two conditions alone are not very robust. better to use something
//like the number of significant peaks in a horizontal projection as a third condition
){
if(draw_contours){
rectangle(res, Rect(rect.x * 2, rect.y * 2, rect.width * 2, rect.height * 2), Scalar(0, 255, 0), 2);
}
if(test_output){
rectangle(rgb, rect, Scalar(0, 255, 0), 2);
}
if(rect.y < top){
top = rect.y;
}
rect_bottom = rect.y + rect.height;
if(rect_bottom > bottom){
bottom = rect_bottom;
}
if(rect.x < left){
left = rect.x;
}
rect_right = rect.x + rect.width;
if(rect_right > right){
right = rect_right;
}
}
}
if(draw_contours){
rectangle(res, Point(left * 2, top * 2), Point(right * 2, bottom * 2), Scalar(0, 0, 255), 2);
}
if(test_output){
rectangle(rgb, Point(left, top), Point(right, bottom), Scalar(0, 0, 255), 2);
imwrite(string("test_text_contours.jpg"), rgb);
}
return make_tuple(left * 2, top * 2, (right - left) * 2, (bottom - top) * 2);
}
int main(int argc, char* argv[]){
string input;
string output = "output.png";
int
width = 0,
height = 0;
bool
crop = false,
draw = false;
float margin = 0;
// Return error if arguments are missing
if(argc < 3){
cerr << "\nUsage: txtbin input [options] output\n\n"
"Options:\n"
"\t-w <number> -- set max width (keeps aspect ratio)\n"
"\t-h <number> -- set max height (keeps aspect ratio)\n"
"\t-c -- crop text content contour\n"
"\t-m <number> -- add margins (number in %)\n"
"\t-d -- draw text content contours (debugging)\n" << endl;
return 1;
}
// Parse arguments
for(int i = 1; i < argc; i++){
if(i == 1){
input = string(argv[i]);
// Return error if input file is invalid
ifstream stream(input.c_str());
if(!stream.good()){
cerr << "Error: Input file is invalid!" << endl;
return 1;
}
}
else if(string(argv[i]) == "-w"){
width = atoi(argv[++i]);
}
else if(string(argv[i]) == "-h"){
height = atoi(argv[++i]);
}
else if(string(argv[i]) == "-c"){
crop = true;
}
else if(string(argv[i]) == "-m"){
margin = atoi(argv[++i]);
}
else if(string(argv[i]) == "-d"){
draw = true;
}
else if(i == argc - 1){
output = string(argv[i]);
}
}
Mat Img = imread(input, CV_LOAD_IMAGE_GRAYSCALE);
Mat res;
Img.convertTo(Img, CV_32FC1, 1.0 / 255.0);
CalcBlockMeanVariance(Img, res);
res = 1.0 - res;
res = Img + res;
threshold(res, res, 0.85, 1, THRESH_BINARY);
int
txt_x,
txt_y,
txt_width,
txt_height;
if(crop || draw){
tie(txt_x, txt_y, txt_width, txt_height) = detect_text_box(input, res, draw);
}
if(crop){
//res = res(Rect(txt_x, txt_y, txt_width, txt_height)).clone();
res = res(Rect(txt_x, txt_y, txt_width, txt_height));
}
if(margin){
int border = res.cols * margin / 100;
copyMakeBorder(res, res, border, border, border, border, BORDER_CONSTANT, Scalar(255, 255, 255));
}
float
width_input = res.cols,
height_input = res.rows;
bool resized = false;
// Downscale image
if(width > 0 && width_input > width){
float scale = width_input / width;
width_input /= scale;
height_input /= scale;
resized = true;
}
if(height > 0 && height_input > height){
float scale = height_input / height;
width_input /= scale;
height_input /= scale;
resized = true;
}
if(resized){
resize(res, res, Size(round(width_input), round(height_input)));
}
imwrite(output, res * 255);
return 0;
}
Ok :)
Set blockSide smaller (7 for instance) it will give you result image as shown below. It depends on font size, smaller fonts need smaller block size, else text will be filtered out and you get empty image.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdarg.h>
#include "/usr/include/opencv2/opencv.hpp"
#include "fstream"
#include "iostream"
using namespace std;
using namespace cv;
void CalcBlockMeanVariance(Mat& Img,Mat& Res,float blockSide=9) // blockSide - the parameter (set greater for larger font on image)
{
Mat I;
Img.convertTo(I,CV_32FC1);
Res=Mat::zeros(Img.rows/blockSide,Img.cols/blockSide,CV_32FC1);
Mat inpaintmask;
Mat patch;
Mat smallImg;
Scalar m,s;
for(int i=0;i<Img.rows-blockSide;i+=blockSide)
{
for (int j=0;j<Img.cols-blockSide;j+=blockSide)
{
patch=I(Range(i,i+blockSide+1),Range(j,j+blockSide+1));
cv::meanStdDev(patch,m,s);
if(s[0]>0.01) // Thresholding parameter (set smaller for lower contrast image)
{
Res.at<float>(i/blockSide,j/blockSide)=m[0];
}else
{
Res.at<float>(i/blockSide,j/blockSide)=0;
}
}
}
cv::resize(I,smallImg,Res.size());
cv::threshold(Res,inpaintmask,0.02,1.0,cv::THRESH_BINARY);
Mat inpainted;
smallImg.convertTo(smallImg,CV_8UC1,255);
inpaintmask.convertTo(inpaintmask,CV_8UC1);
inpaint(smallImg, inpaintmask, inpainted, 5, INPAINT_TELEA);
cv::resize(inpainted,Res,Img.size());
Res.convertTo(Res,CV_32FC1,1.0/255.0);
}
int main( int argc, char** argv )
{
namedWindow("Img");
namedWindow("Edges");
//Mat Img=imread("D:\\ImagesForTest\\BookPage.JPG",0);
Mat Img=imread("test2.jpg",0);
Mat res;
Img.convertTo(Img,CV_32FC1,1.0/255.0);
CalcBlockMeanVariance(Img,res);
res=1.0-res;
res=Img+res;
imshow("Img",Img);
cv::threshold(res,res,0.85,1,cv::THRESH_BINARY);
cv::resize(res,res,cv::Size(res.cols/2,res.rows/2));
imwrite("result.jpg",res*255);
imshow("Edges",res);
waitKey(0);
return 0;
}
I am trying to implement a Kalman filter based mouse tracking (as a test first) using a velocity-acceleration model.
I want to try this out this simple model, my state transition equations are:
X(k) = [x(k), y(k)]' (Position)
V(k) = [vx(k), vy(k)]' (Velocity)
X(k) = X(k-1) + dt*V(k-1) + 0.5*dt*dt*a(k-1)
V(k) = V(k-1) + t*a(k-1)
a(k) = a(k-1)
Using this I have basically wrote down the following piece of code:
#include <iostream>
#include <vector>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
using namespace cv;
using namespace std;
struct mouse_info_struct { int x,y; };
struct mouse_info_struct mouse_info = {-1,-1}, last_mouse;
void on_mouse(int event, int x, int y, int flags, void* param)
{
//if (event == CV_EVENT_LBUTTONUP)
{
last_mouse = mouse_info;
mouse_info.x = x;
mouse_info.y = y;
}
}
void printmat(const cv::Mat &__mat, std::string __str)
{
std::cout << "--------" << __str << "----------\n";
for (int i=0 ; i<__mat.rows ; ++i)
{
for (int j=0 ; j<__mat.cols ; ++j)
std::cout << __mat.at<double>(i,j) << " ";
std::cout << std::endl;
}
std::cout << "-------------------------------------\n";
}
int main (int argc, char * const argv[])
{
int nStates = 5, nMeasurements = 2, nInputs = 1;
Mat img(500, 900, CV_8UC3);
KalmanFilter KF(nStates, nMeasurements, nInputs, CV_64F);
Mat state(nStates, 1, CV_64F); /* (x, y, Vx, Vy, a) */
Mat measurement(nMeasurements,1,CV_64F); measurement.setTo(Scalar(0));
Mat prevMeasurement(nMeasurements,1,CV_64F); prevMeasurement.setTo(Scalar(0));
int key = -1, dt=100, T=1000;
float /*a=100, acclErrMag = 0.05,*/ measurementErrVar = 100, noiseVal=0.001, covNoiseVal=0.9e-4;
namedWindow("Mouse-Kalman");
setMouseCallback("Mouse-Kalman", on_mouse, 0);
//while ( (char)(key=cv::waitKey(100)) != 'q' )
{
/// A
KF.transitionMatrix.at<double>(0,0) = 1;
KF.transitionMatrix.at<double>(0,1) = 0;
KF.transitionMatrix.at<double>(0,2) = (dt/T);
KF.transitionMatrix.at<double>(0,3) = 0;
KF.transitionMatrix.at<double>(0,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(1,0) = 0;
KF.transitionMatrix.at<double>(1,1) = 1;
KF.transitionMatrix.at<double>(1,2) = 0;
KF.transitionMatrix.at<double>(1,3) = (dt/T);
KF.transitionMatrix.at<double>(1,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(2,0) = 0;
KF.transitionMatrix.at<double>(2,1) = 0;
KF.transitionMatrix.at<double>(2,2) = 1;
KF.transitionMatrix.at<double>(2,3) = 0;
KF.transitionMatrix.at<double>(2,4) = (dt/T);
KF.transitionMatrix.at<double>(3,0) = 0;
KF.transitionMatrix.at<double>(3,1) = 0;
KF.transitionMatrix.at<double>(3,2) = 0;
KF.transitionMatrix.at<double>(3,3) = 1;
KF.transitionMatrix.at<double>(3,4) = (dt/T);
KF.transitionMatrix.at<double>(4,0) = 0;
KF.transitionMatrix.at<double>(4,1) = 0;
KF.transitionMatrix.at<double>(4,2) = 0;
KF.transitionMatrix.at<double>(4,3) = 0;
KF.transitionMatrix.at<double>(4,4) = 1;
/// Initial estimate of state variables
KF.statePost = cv::Mat::zeros(nStates, 1,CV_64F);
KF.statePost.at<double>(0) = mouse_info.x;
KF.statePost.at<double>(1) = mouse_info.y;
KF.statePost.at<double>(2) = 0;
KF.statePost.at<double>(3) = 0;
KF.statePost.at<double>(4) = 0;
KF.statePre = KF.statePost;
/// Ex or Q
setIdentity(KF.processNoiseCov, Scalar::all(noiseVal));
/// Initial covariance estimate Sigma_bar(t) or P'(k)
setIdentity(KF.errorCovPre, Scalar::all(1000));
/// Sigma(t) or P(k)
setIdentity(KF.errorCovPost, Scalar::all(1000));
/// B
KF.controlMatrix = cv::Mat(nStates, nInputs,CV_64F);
KF.controlMatrix.at<double>(0,0) = 0;
KF.controlMatrix.at<double>(1,0) = 0;
KF.controlMatrix.at<double>(2,0) = 0;
KF.controlMatrix.at<double>(3,0) = 0;
KF.controlMatrix.at<double>(4,0) = 1;
/// H
KF.measurementMatrix = cv::Mat::eye(nMeasurements, nStates, CV_64F);
/// Ez or R
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementErrVar*measurementErrVar));
printmat(KF.controlMatrix, "KF.controlMatrix");
printmat(KF.transitionMatrix, "KF.transitionMatrix");
printmat(KF.statePre,"KF.statePre");
printmat(KF.processNoiseCov, "KF.processNoiseCov");
printmat(KF.measurementMatrix, "KF.measurementMatrix");
printmat(KF.measurementNoiseCov, "KF.measurementNoiseCov");
printmat(KF.errorCovPost,"KF.errorCovPost");
printmat(KF.errorCovPre,"KF.errorCovPre");
printmat(KF.statePost,"KF.statePost");
while (mouse_info.x < 0 || mouse_info.y < 0)
{
imshow("Mouse-Kalman", img);
waitKey(30);
continue;
}
while ( (char)key != 's' )
{
/// MAKE A MEASUREMENT
measurement.at<double>(0) = mouse_info.x;
measurement.at<double>(1) = mouse_info.y;
/// MEASUREMENT UPDATE
Mat estimated = KF.correct(measurement);
/// STATE UPDATE
Mat prediction = KF.predict();
cv::Mat u(nInputs,1,CV_64F);
u.at<double>(0,0) = 0.0 * sqrt(pow((prevMeasurement.at<double>(0) - measurement.at<double>(0)),2)
+ pow((prevMeasurement.at<double>(1) - measurement.at<double>(1)),2));
/// STORE ALL DATA
Point predictPt(prediction.at<double>(0),prediction.at<double>(1));
Point estimatedPt(estimated.at<double>(0),estimated.at<double>(1));
Point measuredPt(measurement.at<double>(0),measurement.at<double>(1));
/// PLOT POINTS
#define drawCross( center, color, d ) \
line( img, Point( center.x - d, center.y - d ), \
Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
line( img, Point( center.x + d, center.y - d ), \
Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )
/// DRAW ALL ON IMAGE
img = Scalar::all(0);
drawCross( predictPt, Scalar(255,255,255), 9 ); //WHITE
drawCross( estimatedPt, Scalar(0,0,255), 6 ); //RED
drawCross( measuredPt, Scalar(0,255,0), 3 ); //GREEN
line( img, estimatedPt, measuredPt, Scalar(100,255,255), 3, CV_AA, 0 );
line( img, estimatedPt, predictPt, Scalar(0,255,255), 3, CV_AA, 0 );
prevMeasurement = measurement;
imshow( "Mouse-Kalman", img );
key=cv::waitKey(10);
}
}
return 0;
}
Here is the output of the code: http://www.youtube.com/watch?v=9_xd4HSz8_g
As you can see that the tracking very very slow. I don't understand what is wrong with the model and why the estimation is so slow. I don't expect there should be any control input.
Can anyone explain this?
I have modified my code and I am posting it for those who want to tweak it to play around for more. The main problem was the choice of covariances.
#include <iostream>
#include <vector>
#include <cstdio>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/video/tracking.hpp>
using namespace cv;
using namespace std;
struct mouse_info_struct { int x,y; };
struct mouse_info_struct mouse_info = {-1,-1}, last_mouse;
vector<Point> mousev,kalmanv;
int trackbarProcessNoiseCov = 10, trackbarMeasurementNoiseCov = 10, trackbarStateEstimationErrorCov = 10;
float processNoiseCov=10, measurementNoiseCov = 1000, stateEstimationErrorCov = 50;
int trackbarProcessNoiseCovMax=10000, trackbarMeasurementNoiseCovMax = 10000,
trackbarStateEstimationErrorCovMax = 5000;
float processNoiseCovMin=0, measurementNoiseCovMin = 0,
stateEstimationErrorCovMin = 0;
float processNoiseCovMax=100, measurementNoiseCovMax = 5000,
stateEstimationErrorCovMax = 5000;
int nStates = 5, nMeasurements = 2, nInputs = 1;
KalmanFilter KF(nStates, nMeasurements, nInputs, CV_64F);
void on_mouse(int event, int x, int y, int flags, void* param)
{
last_mouse = mouse_info;
mouse_info.x = x;
mouse_info.y = y;
}
void on_trackbarProcessNoiseCov( int, void* )
{
processNoiseCov = processNoiseCovMin +
(trackbarProcessNoiseCov * (processNoiseCovMax-processNoiseCovMin)/trackbarProcessNoiseCovMax);
setIdentity(KF.processNoiseCov, Scalar::all(processNoiseCov));
std::cout << "\nProcess Noise Cov: " << processNoiseCov;
std::cout << "\nMeasurement Noise Cov: " << measurementNoiseCov << std::endl;
}
void on_trackbarMeasurementNoiseCov( int, void* )
{
measurementNoiseCov = measurementNoiseCovMin +
(trackbarMeasurementNoiseCov * (measurementNoiseCovMax-measurementNoiseCovMin)/trackbarMeasurementNoiseCovMax);
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementNoiseCov));
std::cout << "\nProcess Noise Cov: " << processNoiseCov;
std::cout << "\nMeasurement Noise Cov: " << measurementNoiseCov << std::endl;
}
int main (int argc, char * const argv[])
{
Mat img(500, 1000, CV_8UC3);
Mat state(nStates, 1, CV_64F);/* (x, y, Vx, Vy, a) */
Mat measurementNoise(nMeasurements, 1, CV_64F), processNoise(nStates, 1, CV_64F);
Mat measurement(nMeasurements,1,CV_64F); measurement.setTo(Scalar(0.0));
Mat noisyMeasurement(nMeasurements,1,CV_64F); noisyMeasurement.setTo(Scalar(0.0));
Mat prevMeasurement(nMeasurements,1,CV_64F); prevMeasurement.setTo(Scalar(0.0));
Mat prevMeasurement2(nMeasurements,1,CV_64F); prevMeasurement2.setTo(Scalar(0.0));
int key = -1, dt=50, T=1000;
namedWindow("Mouse-Kalman");
setMouseCallback("Mouse-Kalman", on_mouse, 0);
createTrackbar( "Process Noise Cov", "Mouse-Kalman", &trackbarProcessNoiseCov,
trackbarProcessNoiseCovMax, on_trackbarProcessNoiseCov );
createTrackbar( "Measurement Noise Cov", "Mouse-Kalman", &trackbarMeasurementNoiseCov,
trackbarMeasurementNoiseCovMax, on_trackbarMeasurementNoiseCov );
on_trackbarProcessNoiseCov( trackbarProcessNoiseCov, 0 );
on_trackbarMeasurementNoiseCov( trackbarMeasurementNoiseCov, 0 );
//while ( (char)(key=cv::waitKey(100)) != 'q' )
{
/// A (TRANSITION MATRIX INCLUDING VELOCITY AND ACCELERATION MODEL)
KF.transitionMatrix.at<double>(0,0) = 1;
KF.transitionMatrix.at<double>(0,1) = 0;
KF.transitionMatrix.at<double>(0,2) = (dt/T);
KF.transitionMatrix.at<double>(0,3) = 0;
KF.transitionMatrix.at<double>(0,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(1,0) = 0;
KF.transitionMatrix.at<double>(1,1) = 1;
KF.transitionMatrix.at<double>(1,2) = 0;
KF.transitionMatrix.at<double>(1,3) = (dt/T);
KF.transitionMatrix.at<double>(1,4) = 0.5*(dt/T)*(dt/T);
KF.transitionMatrix.at<double>(2,0) = 0;
KF.transitionMatrix.at<double>(2,1) = 0;
KF.transitionMatrix.at<double>(2,2) = 1;
KF.transitionMatrix.at<double>(2,3) = 0;
KF.transitionMatrix.at<double>(2,4) = (dt/T);
KF.transitionMatrix.at<double>(3,0) = 0;
KF.transitionMatrix.at<double>(3,1) = 0;
KF.transitionMatrix.at<double>(3,2) = 0;
KF.transitionMatrix.at<double>(3,3) = 1;
KF.transitionMatrix.at<double>(3,4) = (dt/T);
KF.transitionMatrix.at<double>(4,0) = 0;
KF.transitionMatrix.at<double>(4,1) = 0;
KF.transitionMatrix.at<double>(4,2) = 0;
KF.transitionMatrix.at<double>(4,3) = 0;
KF.transitionMatrix.at<double>(4,4) = 1;
/// Initial estimate of state variables
KF.statePost = cv::Mat::zeros(nStates, 1,CV_64F);
KF.statePost.at<double>(0) = mouse_info.x;
KF.statePost.at<double>(1) = mouse_info.y;
KF.statePost.at<double>(2) = 0.1;
KF.statePost.at<double>(3) = 0.1;
KF.statePost.at<double>(4) = 0.1;
KF.statePre = KF.statePost;
state = KF.statePost;
/// Ex or Q (PROCESS NOISE COVARIANCE)
setIdentity(KF.processNoiseCov, Scalar::all(processNoiseCov));
/// Initial covariance estimate Sigma_bar(t) or P'(k)
setIdentity(KF.errorCovPre, Scalar::all(stateEstimationErrorCov));
/// Sigma(t) or P(k) (STATE ESTIMATION ERROR COVARIANCE)
setIdentity(KF.errorCovPost, Scalar::all(stateEstimationErrorCov));
/// B (CONTROL MATRIX)
KF.controlMatrix = cv::Mat(nStates, nInputs,CV_64F);
KF.controlMatrix.at<double>(0,0) = /*0.5*(dt/T)*(dt/T);//*/0;
KF.controlMatrix.at<double>(1,0) = /*0.5*(dt/T)*(dt/T);//*/0;
KF.controlMatrix.at<double>(2,0) = 0;
KF.controlMatrix.at<double>(3,0) = 0;
KF.controlMatrix.at<double>(4,0) = 1;
/// H (MEASUREMENT MATRIX)
KF.measurementMatrix = cv::Mat::eye(nMeasurements, nStates, CV_64F);
/// Ez or R (MEASUREMENT NOISE COVARIANCE)
setIdentity(KF.measurementNoiseCov, Scalar::all(measurementNoiseCov));
while (mouse_info.x < 0 || mouse_info.y < 0)
{
imshow("Mouse-Kalman", img);
waitKey(30);
continue;
}
prevMeasurement.at<double>(0,0) = 0;
prevMeasurement.at<double>(1,0) = 0;
prevMeasurement2 = prevMeasurement;
while ( (char)key != 's' )
{
/// STATE UPDATE
Mat prediction = KF.predict();
/// MAKE A MEASUREMENT
measurement.at<double>(0) = mouse_info.x;
measurement.at<double>(1) = mouse_info.y;
/// MEASUREMENT NOISE SIMULATION
randn( measurementNoise, Scalar(0),
Scalar::all(sqrtf(measurementNoiseCov)));
noisyMeasurement = measurement + measurementNoise;
/// MEASUREMENT UPDATE
Mat estimated = KF.correct(noisyMeasurement);
cv::Mat u(nInputs,1,CV_64F);
u.at<double>(0,0) = 0.0 * sqrtf(pow((prevMeasurement.at<double>(0) - measurement.at<double>(0)),2)
+ pow((prevMeasurement.at<double>(1) - measurement.at<double>(1)),2));
/// STORE ALL DATA
Point noisyPt(noisyMeasurement.at<double>(0),noisyMeasurement.at<double>(1));
Point estimatedPt(estimated.at<double>(0),estimated.at<double>(1));
Point measuredPt(measurement.at<double>(0),measurement.at<double>(1));
/// PLOT POINTS
#define drawCross( center, color, d ) \
line( img, Point( center.x - d, center.y - d ), \
Point( center.x + d, center.y + d ), color, 2, CV_AA, 0); \
line( img, Point( center.x + d, center.y - d ), \
Point( center.x - d, center.y + d ), color, 2, CV_AA, 0 )
/// DRAW ALL ON IMAGE
img = Scalar::all(0);
drawCross( noisyPt, Scalar(255,255,255), 9 ); //WHITE
drawCross( estimatedPt, Scalar(0,0,255), 6 ); //RED
drawCross( measuredPt, Scalar(0,255,0), 3 ); //GREEN
line( img, estimatedPt, measuredPt, Scalar(100,255,255), 3, CV_AA, 0 );
line( img, estimatedPt, noisyPt, Scalar(0,255,255), 3, CV_AA, 0 );
imshow( "Mouse-Kalman", img );
key=cv::waitKey(dt);
prevMeasurement = measurement;
}
}
return 0;
}