Related
Here I got the code of extracting the orientation of the object in the image. I am new to OpenCV and C++. But I need to get this work done.
My question is, how to extract, write out the information of the angle and axis in this code?
#include "pch.h"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
double hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, LINE_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64F);
for (int i = 0; i < data_pts.rows; i++)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), PCA::DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
for (int i = 0; i < 2; i++)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(i);
}
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.02 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.02 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int argc, char** argv)
{
// Load image
CommandLineParser parser(argc, argv, "{#input | joint2.bmp | input image}");
parser.about("This program demonstrates how to use OpenCV PCA to extract the orientation of an object.\n");
parser.printMessage();
Mat src = imread(parser.get<String>("#input"));
// Check if image is loaded successfully
if (src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 200, 255, THRESH_BINARY | THRESH_OTSU);
// Find all the contours in the thresholded image
vector<vector<Point> > contours;
findContours(bw, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); i++)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2);
// Find the orientation of each shape
getOrientation(contours[i], src);
}
imshow("output", src);
waitKey();
return 0;
}
Here is the image of the object:
And here is the result:
As you see, it finds orientation correctly, but I need the information about the angle and which is which axis to be written.
Will be very grateful if someone knows how to do it!
EDIT: I have figured out how to find the information about the center, area, and the angle.
/// Get the moments
vector<Moments> mu(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i]);
}
/// Get the mass centers
vector<Point2f> mc(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
//add 1e-5 to avoid division by zero
mc[i] = Point2f(static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)),
static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)));
}
imshow("output", src);
cout << "\t Info: Area and angle \n";
for (size_t i = 0; i < contours.size(); i++)
{
cout << " * Contour[" << i << "] - Center: "<< mc[i]
<< " - Area: " << contourArea(contours[i]) << " - Angle: " << getOrientation(contours[i],src)*180/CV_PI << endl;
}
But still don't know how to denote which arrow is which axis in the image.
So I have figured out everything I need (almost).
Here is the final code:
#include "pch.h"
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
string s = "";
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
double hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, LINE_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, LINE_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64F);
for (int i = 0; i < data_pts.rows; i++)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), PCA::DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
for (int i = 0; i < 2; i++)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(i);
}
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.01 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.005 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
putText(img, s = "Y-axis", p1, cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 100));
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
putText(img, s = "X-axis", p2/1.1 , cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 255));
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int argc, char** argv)
{
// Load image
CommandLineParser parser(argc, argv, "{#input | circle3.bmp | input image}");
parser.about("This program demonstrates how to use OpenCV PCA to extract the orientation of an object.\n");
parser.printMessage();
Mat src = imread(parser.get<String>("#input"));
// Check if image is loaded successfully
if (src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 70, 255, THRESH_BINARY | THRESH_OTSU);
// Find all the contours in the thresholded image
vector<vector<Point> > contours;
findContours(bw, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); i++)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2);
// Find the orientation of each shape
getOrientation(contours[i], src);
}
/// Get the moments
vector<Moments> mu(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
mu[i] = moments(contours[i]);
}
/// Get the mass centers
vector<Point2f> mc(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
//add 1e-5 to avoid division by zero
mc[i] = Point2f(static_cast<float>(mu[i].m10 / (mu[i].m00 + 1e-5)),
static_cast<float>(mu[i].m01 / (mu[i].m00 + 1e-5)));
for (int i = 0; i < contours.size(); i++) {
std::stringstream ss; ss << i;
putText(src, ss.str(), mc[i] + Point2f(10,-10), cv::FONT_HERSHEY_COMPLEX_SMALL, 1, cv::Scalar(255, 0, 255));
}
}
imshow("output", src);
cout << "\t Info: Area and angle \n";
for (size_t i = 0; i < contours.size(); i++)
{
cout << " * Contour[" << i << "] - Center: "<< mc[i]
<< " - Area: " << contourArea(contours[i]) << " - Angle X: " << getOrientation(contours[i],src)*180/CV_PI << endl;
}
waitKey();
return 0;
}
The only thing I want to know how to draw the coordinate system in the corner of the image. Because I don't understand the angle results.
The results of the final code: https://imgur.com/l7t9bns
And relevant information: https://imgur.com/OuE79rR
I have been tasked with making my own Sobel method, and not use the cv::Sobel found in OpenCV.
I tried implementing one I found at Programming techniques
When I run the program, cv::Mat throws an error, however. Anyone have any idea why?
Sobel method:
int sobelCorrelation(Mat InputArray, int x, int y, String xory)
{
if (xory == "x") {
return InputArray.at<uchar>(y - 1, x - 1) +
2 * InputArray.at<uchar>(y, x - 1) +
InputArray.at<uchar>(y + 1, x - 1) -
InputArray.at<uchar>(y - 1, x + 1) -
2 * InputArray.at<uchar>(y, x + 1) -
InputArray.at<uchar>(y + 1, x + 1);
}
else if (xory == "y")
{
return InputArray.at<uchar>(y - 1, x - 1) +
2 * InputArray.at<uchar>(y - 1, x) +
InputArray.at<uchar>(y - 1, x + 1) -
InputArray.at<uchar>(y + 1, x - 1) -
2 * InputArray.at<uchar>(y + 1, x) -
InputArray.at<uchar>(y + 1, x + 1);
}
else
{
return 0;
}
}
Calling and processing it in another function:
void imageOutput(Mat image, String path) {
image = imread(path, 0);
Mat dst;
dst = image.clone();
int sum, gx, gy;
if (image.data && !image.empty()){
for (int y = 0; y < image.rows; y++)
for (int x = 0; x < image.cols; x++)
dst.at<uchar>(y, x) = 0.0;
for (int y = 1; y < image.rows - 1; ++y) {
for (int x = 1; x < image.cols - 1; ++x){
gx = sobelCorrelation(image, x, y, "x");
gy = sobelCorrelation(image, x, y, "y");
sum = absVal(gx) + absVal(gy);
if (sum > 255)
sum = 255;
else if (sum < 0)
sum = 0;
dst.at<uchar>(x, y) = sum;
}
}
namedWindow("Original");
imshow("Original", image);
namedWindow("Diagonal Edges");
imshow("Diagonal Edges", dst);
}
waitKey(0);
}
Main:
int main(int argc, char* argv[]) {
Mat image;
imageOutput(image, "C:/Dropbox/2-falling-toast-ted-kinsman.jpg");
return 0;
}
The absVal method:
int absVal(int v)
{
return v*((v < 0)*(-1) + (v > 0));
}
When run it throws this error:
Unhandled exception at 0x00007FFC9365A1C8 in Miniproject01.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000A780A4F110.
and points to here:
template<typename _Tp> inline
_Tp& Mat::at(int i0, int i1)
{
CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
(unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()) &&
CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
return ((_Tp*)(data + step.p[0] * i0))[i1];
}
If anyone have any advice or ideas what I am doing wrong it would be greatly appreciated!
This code snippet is to demonstrate how to compute Sobel 3x3 derivatives convolving the image with Sobel kernels. You can easily extend to different kernel sizes giving the kernel radius as input to my_sobel, and creating the appropriate kernel.
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
void my_sobel(const Mat1b& src, Mat1s& dst, int direction)
{
Mat1s kernel;
int radius = 0;
// Create the kernel
if (direction == 0)
{
// Sobel 3x3 X kernel
kernel = (Mat1s(3,3) << -1, 0, +1, -2, 0, +2, -1, 0, +1);
radius = 1;
}
else
{
// Sobel 3x3 Y kernel
kernel = (Mat1s(3, 3) << -1, -2, -1, 0, 0, 0, +1, +2, +1);
radius = 1;
}
// Handle border issues
Mat1b _src;
copyMakeBorder(src, _src, radius, radius, radius, radius, BORDER_REFLECT101);
// Create output matrix
dst.create(src.rows, src.cols);
// Convolution loop
// Iterate on image
for (int r = radius; r < _src.rows - radius; ++r)
{
for (int c = radius; c < _src.cols - radius; ++c)
{
short s = 0;
// Iterate on kernel
for (int i = -radius; i <= radius; ++i)
{
for (int j = -radius; j <= radius; ++j)
{
s += _src(r + i, c + j) * kernel(i + radius, j + radius);
}
}
dst(r - radius, c - radius) = s;
}
}
}
int main(void)
{
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
// Compute custom Sobel 3x3 derivatives
Mat1s sx, sy;
my_sobel(img, sx, 0);
my_sobel(img, sy, 1);
// Edges L1 norm
Mat1b edges_L1;
absdiff(sx, sy, edges_L1);
// Check results against OpenCV
Mat1s cvsx,cvsy;
Sobel(img, cvsx, CV_16S, 1, 0);
Sobel(img, cvsy, CV_16S, 0, 1);
Mat1b cvedges_L1;
absdiff(cvsx, cvsy, cvedges_L1);
Mat diff_L1;
absdiff(edges_L1, cvedges_L1, diff_L1);
cout << "Number of different pixels: " << countNonZero(diff_L1) << endl;
return 0;
}
If i were you, i would almost always avoid using for loops(if possible). Unnecessary for loops tend to slow down the execution. Instead, reuse wherever possible. For example, the code below uses filter2D give 2d Correlation result:
Mat kern = (Mat_<float>(3,3)<<-1,0,1,-2,0,2,-1,0,1);
Mat dest;
cv::filter2D(src,dest,src.type(),kern);
If you would like to get convolution results, you would need to flip the kernel 'kern' before filtering.
cv::flip(kern,kern, -1);
If you would like to squeeze more performance, you can use separable filters 'sepFilter2D'.
thanks for the post,
I was able to generate gradiant map using the above kernel, and using openCV code filter2D getting from
Using custom kernel in opencv 2DFilter - causing crash ... convolution how?
to convolve the image with the kernel. the code that I used is
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
//Loading the source image
Mat src;
//src = imread("1.png");
src = cv::imread("E:\\Gray_Image.bmp", 0);
//Output image of the same size and the same number of channels as src.
Mat dst1,dst2,grad;
//Mat dst = src.clone(); //didn't help...
//desired depth of the destination image
//negative so dst will be the same as src.depth()
int ddepth = -1;
//the convolution kernel, a single-channel floating point matrix:
//Mat kernel = imread("kernel.png");
Mat kernel_x = (Mat_<float>(3, 3) << -1, 0, 1, -2, 0, 2, -1, 0, 1);
Mat kernel_y = (Mat_<float>(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1);
kernel_x.convertTo(kernel_x, CV_32F); kernel_y.convertTo(kernel_y, CV_32F); //<<not working
//normalize(kernel, kernel, 1.0, 0.0, 4, -1, noArray()); //doesn't help
//cout << kernel.size() << endl; // ... gives 11, 11
//however, the example from tutorial that does work:
//kernel = Mat::ones( 11, 11, CV_32F )/ (float)(11*11);
//default value (-1,-1) here means that the anchor is at the kernel center.
Point anchor = Point(-1, -1);
//value added to the filtered pixels before storing them in dst.
double delta = 0;
//alright, let's do this...
filter2D(src, dst1, ddepth, kernel_x, anchor, delta, BORDER_DEFAULT);
filter2D(src, dst2, ddepth, kernel_y, anchor, delta, BORDER_DEFAULT);
imshow("Source", src); //<< unhandled exception here
//imshow("Kernel1", kernel_x); imshow("Kernel2", kernel_y);
imshow("Destination1", dst1);
imshow("Destination2", dst2);
addWeighted(dst1, 0.5, dst2, 0.5, 0, grad);
imshow("Destination3", grad);
waitKey(1000000);
return 0;
}
I am trying to implement the basic gradient descent algorithm on my uniformly distributed training set. As the data is uniform so the partition line should be diagonal, but i am getting a line as in below figure. In the figure circles are my data points and the line represent the cost function(h(x)).
I am using OpenCV just for output nothing else. I am using below equation:-
#include <iostream>
#include <unistd.h>
#include <cv.h>
#include <highgui.h>
#define WIN_WIDTH 500
#define WIN_HEIGHT 500
#define MAX_POINTS 500
using namespace std;
using namespace cv;
void getPoints(vector<Point> &randPoints, int size)
{
for (int i = 20; i < WIN_HEIGHT; i+=20)
{
for (int j = 20; j < WIN_WIDTH; j+=20)
{
int x = i;
int y = j;
Point pt = Point(x, y);
randPoints.push_back(pt);
}
}
}
void gradientDescent( double &th1, double &th2, double &alpha, vector<Point> &pointVec)
{
int size = pointVec.size();
double sum1 = 0.0, sum2 = 0.0;
for (int i = 0; i < size; i++)
{
sum1 += (th1 + th2 * pointVec[i].x) - pointVec[i].y;
sum2 += ((th1 + th2 * pointVec[i].x) - pointVec[i].y) * pointVec[i].x;
}
th1 = th1 - ((alpha/( double)size) * sum1);
th2 = th2 - ((alpha/( double)size) * sum2);
}
int main(int argc, char**argv)
{
Mat img(WIN_WIDTH, WIN_HEIGHT, CV_8UC3);
img = Scalar(255, 255, 255);
vector<Point> randPoints;
getPoints(randPoints, MAX_POINTS);
int size = randPoints.size();
cout << "Training size = " << randPoints.size() << endl;
for (int i = 0; i < size; i++)
circle(img, randPoints[i], 4, Scalar(255, 0, 0), 1, 8);
double theta1 = 0, theta2 = 0.25, alpha = 0.0000001;
if (argc > 2)
{
theta1 = atof(argv[1]);
theta2 = atof(argv[2]);
}
int countConv = 0, prevY = 0;
cout << "Theta0 = " << theta1 << " Theta1 = " << theta2 << endl;
cout << "Learning rate = " << alpha << endl;
Mat tmpImg(WIN_WIDTH, WIN_HEIGHT, CV_8UC3);
while(1)
{
gradientDescent(theta1, theta2, alpha, randPoints);
int x = WIN_WIDTH+WIN_HEIGHT;
int y = theta1 + (theta2 * x);
int x1 = WIN_WIDTH-200;
int y1 = theta1 + theta2*x1;
img.copyTo(tmpImg);
circle(tmpImg, Point(x1, y1), 4, Scalar(0, 0, 255), -1, 8);
char text[64];
sprintf(text, "(%d, %d)", x1, y1);
putText(tmpImg, text, Point(x1+3, y1+3), FONT_HERSHEY_SCRIPT_SIMPLEX, 0.4, Scalar(0, 255, 0), 1, 8);
line(tmpImg, Point(0, theta1), Point(x, y), Scalar(0, 0, 255));
imshow("Gradient Descent", tmpImg);
waitKey(33);
}
imshow("Gradient Descent", tmpImg);
waitKey(0);
return 0;
}
The following code is for partical filter for mouse and I change it for track on video with color, this works.
But I want to add scale to it now which only works with x and y. I tried to add scale to it but I failed. Please help me to add scale to the object detected to partical filter.
// Module "core"
#include <opencv2/core/core.hpp>
#include < opencv2/video/background_segm.hpp>
// Module "highgui"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/legacy/legacy.hpp>
// Module "imgproc"
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/videostab/videostab.hpp"
// Module "video"
#include <opencv2/video/video.hpp>
// Output
#include <iostream>
// Vector
#include <vector>
#define drawCross( center, color, d ) \
line( frame, cv::Point( center.x - d, center.y - d ), \
cv::Point( center.x + d, center.y + d ), color, 1, CV_AA, 0); \
line( frame, cv::Point( center.x + d, center.y - d ), \
cv::Point( center.x - d, center.y + d ), color, 1, CV_AA, 0 )
#define PLOT_PARTICLES 1
using namespace std;
using namespace cv;
// >>>>> Color to be tracked
#define MIN_H_BLUE 200
#define MAX_H_BLUE 300
// <<<<< Color to be tracked
vector<cv::Point> mouseV, particleV;
int main()
{
// Camera frame
cv::Mat frame;
char code = (char)-1;
cv::namedWindow("mouse particle");
cv::Mat_<float> measurement(2,1);
measurement.setTo(cv::Scalar(0));
int dim = 2;
int nParticles = 300;
float xRange = 650.0;
float yRange = 650.0;
float minRange[] = { 0, 0 };
float maxRange[] = { xRange, yRange };
CvMat LB, UB;
cvInitMatHeader(&LB, 2, 1, CV_32FC1, minRange);
cvInitMatHeader(&UB, 2, 1, CV_32FC1, maxRange);
CvConDensation* condens = cvCreateConDensation(dim, dim, nParticles);
cvConDensInitSampleSet(condens, &LB, &UB);
condens->DynamMatr[0] = 1.0;
condens->DynamMatr[1] = 0.0;
condens->DynamMatr[2] = 0.0;
condens->DynamMatr[3] = 1.0;
// Camera Index
string idx = "a.mp4";
// Camera Capture
cv::VideoCapture cap;
// >>>>> Camera Settings
if (!cap.open(idx))
{
cout << "Webcam not connected.\n" << "Please verify\n";
return EXIT_FAILURE;
}
cap.set(CV_CAP_PROP_FRAME_WIDTH, 1024);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 768);
// <<<<< Camera Settings
cout << "\nHit 'q' to exit...\n";
char ch = 0;
double ticks = 0;
bool found = false;
int notFoundCount = 0;
// >>>>> Main loop
while (ch != 'q' && ch != 'Q')
{
double precTick = ticks;
ticks = (double) cv::getTickCount();
double dT = (ticks - precTick) / cv::getTickFrequency(); //seconds
// Frame acquisition
cap >> frame;
mouseV.clear();
particleV.clear();
// >>>>> Noise smoothing
cv::Mat blur;
cv::GaussianBlur(frame, blur, cv::Size(5, 5), 3.0, 3.0);
// <<<<< Noise smoothing
// >>>>> HSV conversion
cv::Mat frmHsv;
cv::cvtColor(blur, frmHsv, CV_BGR2HSV);
// <<<<< HSV conversion
// >>>>> Color Thresholding
// Note: change parameters for different colors
cv::Mat rangeRes = cv::Mat::zeros(frame.size(), CV_8UC1);
cv::inRange(frmHsv, cv::Scalar(MIN_H_BLUE / 2, 100, 80),
cv::Scalar(MAX_H_BLUE / 2, 255, 255), rangeRes);
// <<<<< Color Thresholding
// >>>>> Improving the result
cv::erode(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
cv::dilate(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
// <<<<< Improving the result
// >>>>> Contours detection
vector<vector<cv::Point> > contours;
cv::findContours(rangeRes, contours, CV_RETR_EXTERNAL,
CV_CHAIN_APPROX_NONE);
// <<<<< Contours detection
// >>>>> Filtering
vector<vector<cv::Point> > balls;
vector<cv::Rect> ballsBox;
for (size_t i = 0; i < contours.size(); i++)
{
cv::Rect bBox;
bBox = cv::boundingRect(contours[i]);
float ratio = (float) bBox.width / (float) bBox.height;
if (ratio > 1.0f)
ratio = 1.0f / ratio;
// Searching for a bBox almost square
// if (ratio > 0.55 && bBox.area() >= 50)
// {
balls.push_back(contours[i]);
ballsBox.push_back(bBox);
measurement(0) = bBox.x;
measurement(1) = bBox.y;
measurement(2) = ballsBox.size();
//cout << "Balls found:" << bBox.x << endl;
// }
}
/*
cout << "Balls found:" << ballsBox.size() << endl;
*/
cv::Point measPt(measurement(0),measurement(1));
mouseV.push_back(measPt);
for (int i = 0; i < condens->SamplesNum; i++) {
float diffX = (measurement(0) - condens->flSamples[i][0])/xRange;
float diffY = (measurement(1) - condens->flSamples[i][1])/yRange;
condens->flConfidence[i] = 1.0 / (sqrt(diffX * diffX + diffY * diffY));
// plot particles
#ifdef PLOT_PARTICLES
cv::Point partPt(condens->flSamples[i][0], condens->flSamples[i][1]);
drawCross(partPt , cv::Scalar(255,0,255), 2);
#endif
}
cvConDensUpdateByTime(condens);
cv::Point statePt(condens->State[0], condens->State[1]);
particleV.push_back(statePt);
for (int i = 0; i < particleV.size() - 1; i++) {
line(frame, particleV[i], particleV[i+1], cv::Scalar(0,255,0), 1);
}
drawCross( statePt, cv::Scalar(255,255,255), 5 );
drawCross( measPt, cv::Scalar(0,0,255), 5 );
for (size_t i = 0; i < balls.size(); i++)
{
cv::drawContours(frame, balls, i, CV_RGB(20,150,20), 1);
cv::rectangle(frame, ballsBox[i], CV_RGB(0,255,0), 2);
cv::Point center;
center.x = ballsBox[i].x + ballsBox[i].width / 2;
center.y = ballsBox[i].y + ballsBox[i].height / 2;
cv::circle(frame, center, 2, CV_RGB(20,150,20), -1);
stringstream sstr;
sstr << "(" << center.x << "," << center.y << ")";
cv::putText(frame, sstr.str(),
cv::Point(center.x + 3, center.y - 3),
cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(20,150,20), 2);
}
cv::imshow("mouse particle", frame);
cv::imshow("ssssssss", rangeRes);
ch = cv::waitKey(1);
}
// <<<<< Main loop
return EXIT_SUCCESS;
}
I'm developing application for iOS. I'm using the camera matrix according to the book Mastering OpenCV.
In my scenario I have a well known box. I know its real dimensions and I know exactly its corner's pixels. Using this information I calculate the camera rotation and the translation vector.
From these parameters I'm able to calculate the camera position.
I'm checking my calculation by projecting the 3D world coordinate back to the image and I get very accurate results.
The world origin in my case is the middle of the bottom line of the box.
The box is open from one side. The image is taken in that direction, so I can see the content of the box.
Now, I have object in the box. I know very well image coordinate (2D) of the corners of this object. I know the real hight of the corner (the real Y and Y <> 0). How do I calculate the world X and Z of the corners of the object.
Here my code:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
Point2f point;
vector<vector<Point2f>> objectPoints(1);
vector<vector<Point2f>> boxPoints(1);
Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
Point3f tmpPoint;
// This fiunction I need to complete
return tmpPoint;
}
int main( int argc, char** argv )
{
///////// Loading image
Mat sourceImage = imread("/Users/Ilan/Xcode/LK Test/LK Test/images/box_center640X480.jpg");
namedWindow( "Source", 1 );
///// Setting box corners /////
point = Point2f((float)102,(float)367.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][0], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)83,(float)90.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][1], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)520,(float)82.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][2], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)510.5,(float)361); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][3], 3, Scalar(0,255,0), -1, 8);
///// Setting object corners /////
point = Point2f((float)403.5,(float)250); //640X480
objectPoints[0].push_back(point);
circle( sourceImage, objectPoints[0][0], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)426.5,(float)251.5); //640X480
objectPoints[0].push_back(point);
circle( sourceImage, objectPoints[0][1], 3, Scalar(0,255,0), -1, 8);
imshow("Source", sourceImage);
vector<vector<Point3f>> worldBoxPoints(1);
Point3f tmpPoint;
tmpPoint = Point3f((float)-100,(float)0,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)-100,(float)-150,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)-150,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)0,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
std::cout << "There are " << boxPoints[0].size() << " roomPoints and " << worldBoxPoints[0].size() << " worldRoomPoints." << std::endl;
cv::Mat cameraMatrix1(3,3,cv::DataType<double>::type);
cv::setIdentity(cameraMatrix1);
cv::Mat distCoeffs1(4,1,cv::DataType<double>::type);
distCoeffs1.at<double>(0) = 0;
distCoeffs1.at<double>(1) = 0;
distCoeffs1.at<double>(2) = 0;
distCoeffs1.at<double>(3) = 0;
//Taken from Mastring OpenCV
double fx = 6.24860291e+02 * ((float)(sourceImage.cols)/352.);
double fy = 6.24860291e+02 * ((float)(sourceImage.rows)/288.);
double cx = (float)(sourceImage.cols)/2.;
double cy = (float)(sourceImage.rows)/2.;
cameraMatrix1.at<double>(0, 0) = fx;
cameraMatrix1.at<double>(1, 1) = fy;
cameraMatrix1.at<double>(0, 2) = cx;
cameraMatrix1.at<double>(1, 2) = cy;
std::cout << "After calib cameraMatrix --- 1: " << cameraMatrix1 << std::endl;
std::cout << "After calib distCoeffs: --- 1" << distCoeffs1 << std::endl;
cv::Mat rvec1(3,1,cv::DataType<double>::type);
cv::Mat tvec1(3,1,cv::DataType<double>::type);
cv::solvePnP(worldBoxPoints[0], boxPoints[0], cameraMatrix1, distCoeffs1, rvec1, tvec1);
std::cout << "rvec --- 1: " << rvec1 << std::endl;
std::cout << "tvec --- 1: " << tvec1 << std::endl;
cv::Mat rvecM1(3,3,cv::DataType<double>::type);
cv::Rodrigues(rvec1,rvecM1);
std::cout << "cameraRotation --- 1 : " << rvecM1 << std::endl;
std::cout << "cameraPosition --- 1 : " << (rvecM1.t())*((-1.0)*tvec1) << std::endl;
std::vector<cv::Point2f> projectedPoints1;
cv::projectPoints(worldBoxPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);
for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
{
std::cout << "box point --- 1: " << boxPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
}
vector<vector<Point3f>> worldObjectPoints(1);
tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][0].x, objectPoints[0][0].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
worldObjectPoints[0].push_back(tmpPoint);
tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][1].x, objectPoints[0][1].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
worldObjectPoints[0].push_back(tmpPoint);
cv::projectPoints(worldObjectPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);
for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
{
std::cout << "object point --- 1: " << objectPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
}
waitKey(0);
return 0;
}
So, I want to implement the calc3DPointOutOf2DwithYknown function. Of course the parameters are according to what I understand now. If I need other parameters I'll use others.
Thanks you so much,
Ilan
I succeed to solve it by myself. If it will help to any one, heres the code:
Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
Point3f tmpPoint;
float r1 = rotMat.at<double>(0,0);
float r2 = rotMat.at<double>(0,1);
float r3 = rotMat.at<double>(0,2);
float r4 = rotMat.at<double>(1,0);
float r5 = rotMat.at<double>(1,1);
float r6 = rotMat.at<double>(1,2);
float r7 = rotMat.at<double>(2,0);
float r8 = rotMat.at<double>(2,1);
float r9 = rotMat.at<double>(2,2);
float t1 = tvec.at<double>(0,0);
float t2 = tvec.at<double>(1,0);
float t3 = tvec.at<double>(2,0);
float xt = (u/fx) - (cx/fx);
float yt = (v/fy) - (cy/fy);
float K1 = xt*r8*worldY + xt*t3 - r2*worldY - t1;
float K2 = xt*r9 - r3;
float K3 = r1 - xt*r7;
float worldZ = (yt*r7*K1 + yt*K3*r8*worldY + yt*K3*t3 - r4*K1 - K3*r5*worldY - K3*t2)/
(r4*K2 + K3*r6 - yt*r7*K2 - yt*K3*r9);
float worldX = (K1 + worldZ*K2)/K3;
tmpPoint = Point3f(worldX, worldY, worldZ);
return tmpPoint;
}