I'm developing application for iOS. I'm using the camera matrix according to the book Mastering OpenCV.
In my scenario I have a well known box. I know its real dimensions and I know exactly its corner's pixels. Using this information I calculate the camera rotation and the translation vector.
From these parameters I'm able to calculate the camera position.
I'm checking my calculation by projecting the 3D world coordinate back to the image and I get very accurate results.
The world origin in my case is the middle of the bottom line of the box.
The box is open from one side. The image is taken in that direction, so I can see the content of the box.
Now, I have object in the box. I know very well image coordinate (2D) of the corners of this object. I know the real hight of the corner (the real Y and Y <> 0). How do I calculate the world X and Z of the corners of the object.
Here my code:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <ctype.h>
using namespace cv;
using namespace std;
Point2f point;
vector<vector<Point2f>> objectPoints(1);
vector<vector<Point2f>> boxPoints(1);
Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
Point3f tmpPoint;
// This fiunction I need to complete
return tmpPoint;
}
int main( int argc, char** argv )
{
///////// Loading image
Mat sourceImage = imread("/Users/Ilan/Xcode/LK Test/LK Test/images/box_center640X480.jpg");
namedWindow( "Source", 1 );
///// Setting box corners /////
point = Point2f((float)102,(float)367.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][0], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)83,(float)90.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][1], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)520,(float)82.5); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][2], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)510.5,(float)361); //640X480
boxPoints[0].push_back(point);
circle( sourceImage, boxPoints[0][3], 3, Scalar(0,255,0), -1, 8);
///// Setting object corners /////
point = Point2f((float)403.5,(float)250); //640X480
objectPoints[0].push_back(point);
circle( sourceImage, objectPoints[0][0], 3, Scalar(0,255,0), -1, 8);
point = Point2f((float)426.5,(float)251.5); //640X480
objectPoints[0].push_back(point);
circle( sourceImage, objectPoints[0][1], 3, Scalar(0,255,0), -1, 8);
imshow("Source", sourceImage);
vector<vector<Point3f>> worldBoxPoints(1);
Point3f tmpPoint;
tmpPoint = Point3f((float)-100,(float)0,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)-100,(float)-150,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)-150,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
tmpPoint = Point3f((float)100,(float)0,(float)0);
worldBoxPoints[0].push_back(tmpPoint);
std::cout << "There are " << boxPoints[0].size() << " roomPoints and " << worldBoxPoints[0].size() << " worldRoomPoints." << std::endl;
cv::Mat cameraMatrix1(3,3,cv::DataType<double>::type);
cv::setIdentity(cameraMatrix1);
cv::Mat distCoeffs1(4,1,cv::DataType<double>::type);
distCoeffs1.at<double>(0) = 0;
distCoeffs1.at<double>(1) = 0;
distCoeffs1.at<double>(2) = 0;
distCoeffs1.at<double>(3) = 0;
//Taken from Mastring OpenCV
double fx = 6.24860291e+02 * ((float)(sourceImage.cols)/352.);
double fy = 6.24860291e+02 * ((float)(sourceImage.rows)/288.);
double cx = (float)(sourceImage.cols)/2.;
double cy = (float)(sourceImage.rows)/2.;
cameraMatrix1.at<double>(0, 0) = fx;
cameraMatrix1.at<double>(1, 1) = fy;
cameraMatrix1.at<double>(0, 2) = cx;
cameraMatrix1.at<double>(1, 2) = cy;
std::cout << "After calib cameraMatrix --- 1: " << cameraMatrix1 << std::endl;
std::cout << "After calib distCoeffs: --- 1" << distCoeffs1 << std::endl;
cv::Mat rvec1(3,1,cv::DataType<double>::type);
cv::Mat tvec1(3,1,cv::DataType<double>::type);
cv::solvePnP(worldBoxPoints[0], boxPoints[0], cameraMatrix1, distCoeffs1, rvec1, tvec1);
std::cout << "rvec --- 1: " << rvec1 << std::endl;
std::cout << "tvec --- 1: " << tvec1 << std::endl;
cv::Mat rvecM1(3,3,cv::DataType<double>::type);
cv::Rodrigues(rvec1,rvecM1);
std::cout << "cameraRotation --- 1 : " << rvecM1 << std::endl;
std::cout << "cameraPosition --- 1 : " << (rvecM1.t())*((-1.0)*tvec1) << std::endl;
std::vector<cv::Point2f> projectedPoints1;
cv::projectPoints(worldBoxPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);
for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
{
std::cout << "box point --- 1: " << boxPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
}
vector<vector<Point3f>> worldObjectPoints(1);
tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][0].x, objectPoints[0][0].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
worldObjectPoints[0].push_back(tmpPoint);
tmpPoint = calc3DPointOutOf2DwithYknown(objectPoints[0][1].x, objectPoints[0][1].y, /*the real Y of the object*/ -40.0, fx, fy, cx, cy, tvec1, rvecM1);
worldObjectPoints[0].push_back(tmpPoint);
cv::projectPoints(worldObjectPoints[0], rvec1, tvec1, cameraMatrix1, distCoeffs1, projectedPoints1);
for(unsigned int i = 0; i < projectedPoints1.size(); ++i)
{
std::cout << "object point --- 1: " << objectPoints[0][i] << " Projected to --- 1: " << projectedPoints1[i] << std::endl;
}
waitKey(0);
return 0;
}
So, I want to implement the calc3DPointOutOf2DwithYknown function. Of course the parameters are according to what I understand now. If I need other parameters I'll use others.
Thanks you so much,
Ilan
I succeed to solve it by myself. If it will help to any one, heres the code:
Point3f calc3DPointOutOf2DwithYknown(double u, double v, float worldY, double fx, double fy, double cx, double cy, Mat tvec, Mat rotMat)
{
Point3f tmpPoint;
float r1 = rotMat.at<double>(0,0);
float r2 = rotMat.at<double>(0,1);
float r3 = rotMat.at<double>(0,2);
float r4 = rotMat.at<double>(1,0);
float r5 = rotMat.at<double>(1,1);
float r6 = rotMat.at<double>(1,2);
float r7 = rotMat.at<double>(2,0);
float r8 = rotMat.at<double>(2,1);
float r9 = rotMat.at<double>(2,2);
float t1 = tvec.at<double>(0,0);
float t2 = tvec.at<double>(1,0);
float t3 = tvec.at<double>(2,0);
float xt = (u/fx) - (cx/fx);
float yt = (v/fy) - (cy/fy);
float K1 = xt*r8*worldY + xt*t3 - r2*worldY - t1;
float K2 = xt*r9 - r3;
float K3 = r1 - xt*r7;
float worldZ = (yt*r7*K1 + yt*K3*r8*worldY + yt*K3*t3 - r4*K1 - K3*r5*worldY - K3*t2)/
(r4*K2 + K3*r6 - yt*r7*K2 - yt*K3*r9);
float worldX = (K1 + worldZ*K2)/K3;
tmpPoint = Point3f(worldX, worldY, worldZ);
return tmpPoint;
}
Related
I have tried to write a code with openCV, based on the algorithm that is mentioned in Trucco & Verri book pp. 159., in order to rectify stereo-images. The purpose of the rectification is to use them in producing DSM, although the code works properly but the results are wrong.
Can any one in the group can help me to fix the problem.
The input is as follows:
The current result is:
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <iomanip>
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main()
{
Mat input, output;
input = imread("E:\\Gray_Image.bmp", 1);
double alpha = -0.0426, beta = -0.1871, gamma = -179.8781,
dx =1, dy = 1, dz = 16750., f = 16750.;
double w = (double)input.cols;
double h = (double)input.rows;
//the cmera intresic file is
Mat CAM = (Mat_<double>(3, 3) <<
f, 0, w / 2,
0, f, h / 2 - 20,
0, 0, 1 );
// Rotation matrices around the X, Y, and Z axis
Mat RX = (Mat_<double>(3, 3) <<
1, 0, 0,
0, cos(alpha), sin(alpha),
0, -sin(alpha), cos(alpha));
Mat RY = (Mat_<double>(3, 3) <<
cos(beta), 0, -sin(beta),
0, 1, 0,
sin(beta), 0, cos(beta));
Mat RZ = (Mat_<double>(3, 3) <<
cos(gamma), sin(gamma), 0,
-sin(gamma), cos(gamma), 0,
0, 0, 1);
// Composed rotation matrix with (RX, RY, RZ)
Mat R = RX * RY * RZ;
// cout << R << endl;
Mat Te1 = (Mat_<double>(3, 1) <<
dx,
dy,
dz);
Mat Te2 = (Mat_<double>(3, 1) <<
-dy,
dx,
0);
Mat Te3 = (Mat_<double>(3, 1) <<
-(dx * dz),
-(dy * dz),
-(dx*dx + dy*dy));
Mat e1 = Te1 * (1.0 / (sqrt(dx*dx + dy*dy + dz*dz)));// * T;
transpose(e1, Te1);
Mat e2 = Te2 * (1.0 / (sqrt(dx*dx + dy*dy)));// * T;
transpose(e2, Te2);
Mat e3 = Te3 * (1.0 / (sqrt((dx*dx + dy*dy)*(dx*dx + dy*dy + dz*dz))));// * T;
transpose(e3, Te3);
Mat Re(3, 3, CV_32F); //
// add the above row matrix into one
Te1.copyTo(Re.row(0));
Te2.copyTo(Re.row(1));
Te3.copyTo(Re.row(2));
cout << CAM << endl;
cout << Re << endl;
warpPerspective(input, output, Re, input.size(), INTER_LANCZOS4);
imshow("Input", input);
imshow("Output", output);
////////////////////////////
waitKey(0);
return 0;
}
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include "opencv2/imgcodecs.hpp"
#include <opencv2/highgui.hpp>
#include <opencv2/ml.hpp>
#define NTRAINING_SAMPLES 100 // Number of training samples per class
#define FRAC_LINEAR_SEP 0.5f // Fraction of samples which compose the linear separable part
using namespace cv;
using namespace cv::ml;
using namespace std;
static void help()
{
cout<< "\n--------------------------------------------------------------------------" << endl
<< "This program shows Support Vector Machines for Non-Linearly Separable Data. " << endl
<< "Usage:" << endl
<< "./non_linear_svms" << endl
<< "--------------------------------------------------------------------------" << endl
<< endl;
}
int main()
{
help();
// Data for visual representation
const int WIDTH = 512, HEIGHT = 512;
Mat I = Mat::zeros(HEIGHT, WIDTH, CV_8UC3);
//--------------------- 1. Set up training data randomly ---------------------------------------
Mat trainData(2*NTRAINING_SAMPLES, 2, CV_32FC1);
Mat labels (2*NTRAINING_SAMPLES, 1, CV_32SC1);
RNG rng(100); // Random value generation class
// Set up the linearly separable part of the training data
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
Mat c = trainClass.colRange(0, 1);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
// Generate random points for the class 2
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0 , 1);
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//------------------ Set up the non-linearly separable part of the training data ---------------
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0,1);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//------------------------- Set up the labels for the classes ---------------------------------
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
//------------------------ 2. Set up the support vector machines parameters --------------------
//------------------------ 3. Train the svm ----------------------------------------------------
cout << "Starting training process" << endl;
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
//svm->setC(0.1);
vector<float> weights;
weights.push_back( 1 );
weights.push_back( 1 );
Mat w(weights);
svm->setClassWeights(w);
svm->setKernel(SVM::INTER);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6));
// svm->train(trainData, ROW_SAMPLE, labels);
_InputArray tr_data1(trainData);
_InputArray lab(labels);
Ptr<TrainData> trainData_ptr = TrainData::create(tr_data1 , ROW_SAMPLE , lab);
svm->trainAuto(trainData_ptr);
cout << "Finished training process" << endl;
//------------------------ 4. Show the decision regions ----------------------------------------
Vec3b green(0,100,0), blue (100,0,0);
for (int i = 0; i < I.rows; ++i)
for (int j = 0; j < I.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i, j);
float response = svm->predict(sampleMat);
if (response == 1) I.at<Vec3b>(j, i) = green;
else if (response == 2) I.at<Vec3b>(j, i) = blue;
}
//----------------------- 5. Show the training data --------------------------------------------
int thick = -1;
int lineType = 8;
float px, py;
// Class 1
for (int i = 0; i < NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
}
//------------------------- 6. Show support vectors --------------------------------------------
thick = 2;
lineType = 8;
Mat sv = svm->getSupportVectors();
for (int i = 0; i < sv.rows; ++i)
{
const float* v = sv.ptr<float>(i);
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
}
cout << endl << " C: "<< svm->getC() <<endl ;
imwrite("result.png", I); // save the Image
imshow("SVM for Non-Linear Training Data", I); // show it to the user
waitKey(0);
}
Currently running this code --> Which uses SVM::train_auto() method takes hours to finish !
So is there a way to make it run on GPU or Multi-thread it ?
The above is just a demo example , but I want to make my SVM train on image datasets where I have -> 4096 features for each image and so I was planning to use train auto to optimize the SVM_C and SVM_NU parameter , assuming it does. If not is there a way I can optimize those parameters ?
Thanks In Advance.
I am trying to implement the basic gradient descent algorithm on my uniformly distributed training set. As the data is uniform so the partition line should be diagonal, but i am getting a line as in below figure. In the figure circles are my data points and the line represent the cost function(h(x)).
I am using OpenCV just for output nothing else. I am using below equation:-
#include <iostream>
#include <unistd.h>
#include <cv.h>
#include <highgui.h>
#define WIN_WIDTH 500
#define WIN_HEIGHT 500
#define MAX_POINTS 500
using namespace std;
using namespace cv;
void getPoints(vector<Point> &randPoints, int size)
{
for (int i = 20; i < WIN_HEIGHT; i+=20)
{
for (int j = 20; j < WIN_WIDTH; j+=20)
{
int x = i;
int y = j;
Point pt = Point(x, y);
randPoints.push_back(pt);
}
}
}
void gradientDescent( double &th1, double &th2, double &alpha, vector<Point> &pointVec)
{
int size = pointVec.size();
double sum1 = 0.0, sum2 = 0.0;
for (int i = 0; i < size; i++)
{
sum1 += (th1 + th2 * pointVec[i].x) - pointVec[i].y;
sum2 += ((th1 + th2 * pointVec[i].x) - pointVec[i].y) * pointVec[i].x;
}
th1 = th1 - ((alpha/( double)size) * sum1);
th2 = th2 - ((alpha/( double)size) * sum2);
}
int main(int argc, char**argv)
{
Mat img(WIN_WIDTH, WIN_HEIGHT, CV_8UC3);
img = Scalar(255, 255, 255);
vector<Point> randPoints;
getPoints(randPoints, MAX_POINTS);
int size = randPoints.size();
cout << "Training size = " << randPoints.size() << endl;
for (int i = 0; i < size; i++)
circle(img, randPoints[i], 4, Scalar(255, 0, 0), 1, 8);
double theta1 = 0, theta2 = 0.25, alpha = 0.0000001;
if (argc > 2)
{
theta1 = atof(argv[1]);
theta2 = atof(argv[2]);
}
int countConv = 0, prevY = 0;
cout << "Theta0 = " << theta1 << " Theta1 = " << theta2 << endl;
cout << "Learning rate = " << alpha << endl;
Mat tmpImg(WIN_WIDTH, WIN_HEIGHT, CV_8UC3);
while(1)
{
gradientDescent(theta1, theta2, alpha, randPoints);
int x = WIN_WIDTH+WIN_HEIGHT;
int y = theta1 + (theta2 * x);
int x1 = WIN_WIDTH-200;
int y1 = theta1 + theta2*x1;
img.copyTo(tmpImg);
circle(tmpImg, Point(x1, y1), 4, Scalar(0, 0, 255), -1, 8);
char text[64];
sprintf(text, "(%d, %d)", x1, y1);
putText(tmpImg, text, Point(x1+3, y1+3), FONT_HERSHEY_SCRIPT_SIMPLEX, 0.4, Scalar(0, 255, 0), 1, 8);
line(tmpImg, Point(0, theta1), Point(x, y), Scalar(0, 0, 255));
imshow("Gradient Descent", tmpImg);
waitKey(33);
}
imshow("Gradient Descent", tmpImg);
waitKey(0);
return 0;
}
The following code is for partical filter for mouse and I change it for track on video with color, this works.
But I want to add scale to it now which only works with x and y. I tried to add scale to it but I failed. Please help me to add scale to the object detected to partical filter.
// Module "core"
#include <opencv2/core/core.hpp>
#include < opencv2/video/background_segm.hpp>
// Module "highgui"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/legacy/legacy.hpp>
// Module "imgproc"
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/videostab/videostab.hpp"
// Module "video"
#include <opencv2/video/video.hpp>
// Output
#include <iostream>
// Vector
#include <vector>
#define drawCross( center, color, d ) \
line( frame, cv::Point( center.x - d, center.y - d ), \
cv::Point( center.x + d, center.y + d ), color, 1, CV_AA, 0); \
line( frame, cv::Point( center.x + d, center.y - d ), \
cv::Point( center.x - d, center.y + d ), color, 1, CV_AA, 0 )
#define PLOT_PARTICLES 1
using namespace std;
using namespace cv;
// >>>>> Color to be tracked
#define MIN_H_BLUE 200
#define MAX_H_BLUE 300
// <<<<< Color to be tracked
vector<cv::Point> mouseV, particleV;
int main()
{
// Camera frame
cv::Mat frame;
char code = (char)-1;
cv::namedWindow("mouse particle");
cv::Mat_<float> measurement(2,1);
measurement.setTo(cv::Scalar(0));
int dim = 2;
int nParticles = 300;
float xRange = 650.0;
float yRange = 650.0;
float minRange[] = { 0, 0 };
float maxRange[] = { xRange, yRange };
CvMat LB, UB;
cvInitMatHeader(&LB, 2, 1, CV_32FC1, minRange);
cvInitMatHeader(&UB, 2, 1, CV_32FC1, maxRange);
CvConDensation* condens = cvCreateConDensation(dim, dim, nParticles);
cvConDensInitSampleSet(condens, &LB, &UB);
condens->DynamMatr[0] = 1.0;
condens->DynamMatr[1] = 0.0;
condens->DynamMatr[2] = 0.0;
condens->DynamMatr[3] = 1.0;
// Camera Index
string idx = "a.mp4";
// Camera Capture
cv::VideoCapture cap;
// >>>>> Camera Settings
if (!cap.open(idx))
{
cout << "Webcam not connected.\n" << "Please verify\n";
return EXIT_FAILURE;
}
cap.set(CV_CAP_PROP_FRAME_WIDTH, 1024);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 768);
// <<<<< Camera Settings
cout << "\nHit 'q' to exit...\n";
char ch = 0;
double ticks = 0;
bool found = false;
int notFoundCount = 0;
// >>>>> Main loop
while (ch != 'q' && ch != 'Q')
{
double precTick = ticks;
ticks = (double) cv::getTickCount();
double dT = (ticks - precTick) / cv::getTickFrequency(); //seconds
// Frame acquisition
cap >> frame;
mouseV.clear();
particleV.clear();
// >>>>> Noise smoothing
cv::Mat blur;
cv::GaussianBlur(frame, blur, cv::Size(5, 5), 3.0, 3.0);
// <<<<< Noise smoothing
// >>>>> HSV conversion
cv::Mat frmHsv;
cv::cvtColor(blur, frmHsv, CV_BGR2HSV);
// <<<<< HSV conversion
// >>>>> Color Thresholding
// Note: change parameters for different colors
cv::Mat rangeRes = cv::Mat::zeros(frame.size(), CV_8UC1);
cv::inRange(frmHsv, cv::Scalar(MIN_H_BLUE / 2, 100, 80),
cv::Scalar(MAX_H_BLUE / 2, 255, 255), rangeRes);
// <<<<< Color Thresholding
// >>>>> Improving the result
cv::erode(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
cv::dilate(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
// <<<<< Improving the result
// >>>>> Contours detection
vector<vector<cv::Point> > contours;
cv::findContours(rangeRes, contours, CV_RETR_EXTERNAL,
CV_CHAIN_APPROX_NONE);
// <<<<< Contours detection
// >>>>> Filtering
vector<vector<cv::Point> > balls;
vector<cv::Rect> ballsBox;
for (size_t i = 0; i < contours.size(); i++)
{
cv::Rect bBox;
bBox = cv::boundingRect(contours[i]);
float ratio = (float) bBox.width / (float) bBox.height;
if (ratio > 1.0f)
ratio = 1.0f / ratio;
// Searching for a bBox almost square
// if (ratio > 0.55 && bBox.area() >= 50)
// {
balls.push_back(contours[i]);
ballsBox.push_back(bBox);
measurement(0) = bBox.x;
measurement(1) = bBox.y;
measurement(2) = ballsBox.size();
//cout << "Balls found:" << bBox.x << endl;
// }
}
/*
cout << "Balls found:" << ballsBox.size() << endl;
*/
cv::Point measPt(measurement(0),measurement(1));
mouseV.push_back(measPt);
for (int i = 0; i < condens->SamplesNum; i++) {
float diffX = (measurement(0) - condens->flSamples[i][0])/xRange;
float diffY = (measurement(1) - condens->flSamples[i][1])/yRange;
condens->flConfidence[i] = 1.0 / (sqrt(diffX * diffX + diffY * diffY));
// plot particles
#ifdef PLOT_PARTICLES
cv::Point partPt(condens->flSamples[i][0], condens->flSamples[i][1]);
drawCross(partPt , cv::Scalar(255,0,255), 2);
#endif
}
cvConDensUpdateByTime(condens);
cv::Point statePt(condens->State[0], condens->State[1]);
particleV.push_back(statePt);
for (int i = 0; i < particleV.size() - 1; i++) {
line(frame, particleV[i], particleV[i+1], cv::Scalar(0,255,0), 1);
}
drawCross( statePt, cv::Scalar(255,255,255), 5 );
drawCross( measPt, cv::Scalar(0,0,255), 5 );
for (size_t i = 0; i < balls.size(); i++)
{
cv::drawContours(frame, balls, i, CV_RGB(20,150,20), 1);
cv::rectangle(frame, ballsBox[i], CV_RGB(0,255,0), 2);
cv::Point center;
center.x = ballsBox[i].x + ballsBox[i].width / 2;
center.y = ballsBox[i].y + ballsBox[i].height / 2;
cv::circle(frame, center, 2, CV_RGB(20,150,20), -1);
stringstream sstr;
sstr << "(" << center.x << "," << center.y << ")";
cv::putText(frame, sstr.str(),
cv::Point(center.x + 3, center.y - 3),
cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(20,150,20), 2);
}
cv::imshow("mouse particle", frame);
cv::imshow("ssssssss", rangeRes);
ch = cv::waitKey(1);
}
// <<<<< Main loop
return EXIT_SUCCESS;
}
I'm currently trying to implement a example of OpenCV's projectPoints method. The idea behind this method is taking as input a set of 3D points, translation/rotation vector's of a given camera and its distortion coeficients, output the corresponding 2D points in the image plane.
The source of code is as follows:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <string>
std::vector<cv::Point3d> Generate3DPoints();
int main(int argc, char* argv[])
{
// Read 3D points
std::vector<cv::Point3d> objectPoints = Generate3DPoints();
std::vector<cv::Point2d> imagePoints;
cv::Mat intrisicMat(3, 3, cv::DataType<double>::type); // Intrisic matrix
intrisicMat.at<double>(0, 0) = 1.6415318549788924e+003;
intrisicMat.at<double>(1, 0) = 0;
intrisicMat.at<double>(2, 0) = 0;
intrisicMat.at<double>(0, 1) = 0;
intrisicMat.at<double>(1, 1) = 1.7067753507885654e+003;
intrisicMat.at<double>(2, 1) = 0;
intrisicMat.at<double>(0, 2) = 5.3262822453148601e+002;
intrisicMat.at<double>(1, 2) = 3.8095355839052968e+002;
intrisicMat.at<double>(2, 2) = 1;
cv::Mat rVec(3, 1, cv::DataType<double>::type); // Rotation vector
rVec.at<double>(0) = -3.9277902400761393e-002;
rVec.at<double>(1) = 3.7803824407602084e-002;
rVec.at<double>(2) = 2.6445674487856268e-002;
cv::Mat tVec(3, 1, cv::DataType<double>::type); // Translation vector
tVec.at<double>(0) = 2.1158489381208221e+000;
tVec.at<double>(1) = -7.6847683212704716e+000;
tVec.at<double>(2) = 2.6169795190294256e+001;
cv::Mat distCoeffs(5, 1, cv::DataType<double>::type); // Distortion vector
distCoeffs.at<double>(0) = -7.9134632415085826e-001;
distCoeffs.at<double>(1) = 1.5623584435644169e+000;
distCoeffs.at<double>(2) = -3.3916502741726508e-002;
distCoeffs.at<double>(3) = -1.3921577146136694e-002;
distCoeffs.at<double>(4) = 1.1430734623697941e+002;
std::cout << "Intrisic matrix: " << intrisicMat << std::endl << std::endl;
std::cout << "Rotation vector: " << rVec << std::endl << std::endl;
std::cout << "Translation vector: " << tVec << std::endl << std::endl;
std::cout << "Distortion coef: " << distCoeffs << std::endl << std::endl;
std::vector<cv::Point2f> projectedPoints;
cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, projectedPoints);
/*for (unsigned int i = 0; i < projectedPoints.size(); ++i)
{
std::cout << "Image point: " << imagePoints[i] << " Projected to " << projectedPoints[i] << std::endl;
}*/
std::cout << "Press any key to exit.";
std::cin.ignore();
std::cin.get();
return 0;
}
std::vector<cv::Point3d> Generate3DPoints()
{
std::vector<cv::Point3d> points;
double x, y, z;
x = .5; y = .5; z = -.5;
points.push_back(cv::Point3d(x, y, z));
x = .5; y = .5; z = .5;
points.push_back(cv::Point3d(x, y, z));
x = -.5; y = .5; z = .5;
points.push_back(cv::Point3d(x, y, z));
x = -.5; y = .5; z = -.5;
points.push_back(cv::Point3d(x, y, z));
x = .5; y = -.5; z = -.5;
points.push_back(cv::Point3d(x, y, z));
x = -.5; y = -.5; z = -.5;
points.push_back(cv::Point3d(x, y, z));
x = -.5; y = -.5; z = .5;
points.push_back(cv::Point3d(x, y, z));
for(unsigned int i = 0; i < points.size(); ++i)
{
std::cout << points[i] << std::endl << std::endl;
}
return points;
}
The application crashes when I try to run the projectPoints method and I have no idea why. Any help would be greatly appreciated.
It seems to be complaining about the type of the output vector of points. Try to replace your call to projectPoints:
cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, projectedPoints);
by this call:
cv::projectPoints(objectPoints, rVec, tVec, intrisicMat, distCoeffs, imagePoints);
This uses the variable of type std::vector<cv::Point2d> instead of std::vector<cv::Point2f>.