I have implemented a polynomial curve fitting method in C++ OpenCV based on the fact that any function can be approximated with the power function. The equation is then written into matrix form and is being solved. Basically, the code is this:
PolynomialFit(std::vector<cv::Point>& points, int order) {
cv::Mat U(points.size(), (order + 1), CV_64F);
cv::Mat Y(points.size(), 1, CV_64F);
for (int i = 0; i < U.rows; i++) {
for (int j = 0; j < U.cols; j++) {
U.at<double>(i, j) = pow(points[i].x, j);
}
}
for (int i = 0; i < Y.rows; i++) {
Y.at<double>(i, 0) = points[i].y;
}
cv::Mat K((order + 1), 1, CV_64F);
if(U.data != NULL) {
K = (U.t() * U).inv() * U.t() * Y;
}
and in main this is how I call it:
int order = 2;
cv::Mat K = PolynomialFit(_points, order);
if(_points.size() > 0) {
for (int j = _points.at(0).x; j < _points.at(_points.size() - 1).x; j++) {
cv::Point2d point(j, 0);
for (int k = 0; k < order + 1; k++) {
point.y += K.at<double>(k, 0) * std::pow(j, k);
}
cv::circle(image, point, 1, cv::Scalar(0, 255, 0), CV_FILLED, CV_AA);
}
}
The problem is, it only works for a certain type of points. For example, in the image below, it only works for the points that are in the left curve. How could I change this behaviour? I already tried changing the order parameter, but the right curve won't fit as it should be.
For calculating fit curve, it has to transform axis.
As you see below, you can get 2 fit curves with horizontal x axis and vertical x axis, and then get the sum of erro power, select one curve which has minium sum.
For this, you can exchange x and y from your code of PolynomialFit function.
Related
I initialize a 4x4 matrix to the identity matrix. Then I initialize a 4x1 vector B with random values. And I want OpenCV to solve the system Ax = B.
Since A is the identity, my expectation is that x = B.
However I am not getting that. First I code:
Mat A(Size(4,4), CV_64FC1);
// Identity matrix, by force
for(int i=0; i<4; i++) {
for (int j=0; j<4; j++) {
if(i=j=) A.at<float>(i,j) = 1;
else A.at<float>(i,j) = 0;
}
}
Mat B(Size(4, 1), CV_64FC1);
B.at<float>(0, 0) = 1;
B.at<float>(1, 0) = 2;
B.at<float>(2, 0) = 3;
B.at<float>(3, 0) = 4;
Mat sol(Size(4, 1), CV_64FC1);
solve(A, B, sol, DECOMP_LU);
The output of the above is sol = <02313, 0, 0, 0>, rather than <1,2,3,4>
First size takes width as first parameter then height for some reason but Mat takes rows then cols which is a bit confusing to be honest x)
Second the 64fc1 is double i suppose not float the rest is working fine
Mat A(Size(4, 4), CV_64FC1);
// Identity matrix, by force
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
if (i ==j) A.at<double>(i, j) = 1;
else A.at<double>(i, j) = 0;
}
}
Mat B(Size(1,4), CV_64FC1);
B.at<double>(0, 0) = 1.0;
B.at<double>( 1,0) = 2.0;
B.at<double>(2, 0) = 3.0;
B.at<double>(3, 0) = 4.0;
Mat sol;
solve(A, B, sol, DECOMP_LU);
cout << sol << endl;
I am trying to implement Laplace sharpening using C++ , here's my code so far:
img = imread("cow.png", 0);
Mat convoSharp() {
//creating new image
Mat res = img.clone();
for (int y = 0; y < res.rows; y++) {
for (int x = 0; x < res.cols; x++) {
res.at<uchar>(y, x) = 0.0;
}
}
//variable declaration
int filter[3][3] = { {0,1,0},{1,-4,1},{0,1,0} };
//int filter[3][3] = { {-1,-2,-1},{0,0,0},{1,2,1} };
int height = img.rows;
int width = img.cols;
int filterHeight = 3;
int filterWidth = 3;
int newImageHeight = height - filterHeight + 1;
int newImageWidth = width - filterWidth + 1;
int i, j, h, w;
//convolution
for (i = 0; i < newImageHeight; i++) {
for (j = 0; j < newImageWidth; j++) {
for (h = i; h < i + filterHeight; h++) {
for (w = j; w < j + filterWidth; w++) {
res.at<uchar>(i,j) += filter[h - i][w - j] * img.at<uchar>(h,w);
}
}
}
}
//img - laplace
for (int y = 0; y < res.rows; y++) {
for (int x = 0; x < res.cols; x++) {
res.at<uchar>(y, x) = img.at<uchar>(y, x) - res.at<uchar>(y, x);
}
}
return res;
}
I don't really know what went wrong, I also tried different filter (1,1,1),(1,-8,1),(1,1,1) and the result is also same (more or less). I don't think that I need to normalize the result because the result is in range of 0 - 255. Can anyone explain what really went wrong in my code?
Problem: uchar is too small to hold partial results of filerting operation.
You should create a temporary variable and add all the filtered positions to this variable then check if value of temp is in range <0,255> if not, you need to clamp the end result to fit <0,255>.
By executing below line
res.at<uchar>(i,j) += filter[h - i][w - j] * img.at<uchar>(h,w);
partial result may be greater than 255 (max value in uchar) or negative (in filter you have -4 or -8). temp has to be singed integer type to handle the case when partial result is negative value.
Fix:
for (i = 0; i < newImageHeight; i++) {
for (j = 0; j < newImageWidth; j++) {
int temp = res.at<uchar>(i,j); // added
for (h = i; h < i + filterHeight; h++) {
for (w = j; w < j + filterWidth; w++) {
temp += filter[h - i][w - j] * img.at<uchar>(h,w); // add to temp
}
}
// clamp temp to <0,255>
res.at<uchar>(i,j) = temp;
}
}
You should also clamp values to <0,255> range when you do the subtraction of images.
The problem is partially that you’re overflowing your uchar, as rafix07 suggested, but that is not the full problem.
The Laplace of an image contains negative values. It has to. And you can’t clamp those to 0, you need to preserve the negative values. Also, it can values up to 4*255 given your version of the filter. What this means is that you need to use a signed 16 bit type to store this output.
But there is a simpler and more efficient approach!
You are computing img - laplace(img). In terms of convolutions (*), this is 1 * img - laplace_kernel * img = (1 - laplace_kernel) * img. That is to say, you can combine both operations into a single convolution. The 1 kernel that doesn’t change the image is [(0,0,0),(0,1,0),(0,0,0)]. Subtract your Laplace kernel from that and you obtain [(0,-1,0),(-1,5,-1),(0,-1,0)].
So, simply compute the convolution with that kernel, and do it using int as intermediate type, which you then clamp to the uchar output range as shown by rafix07.
I have utilised the OpenCV GrabCut functionality to perform an image segmentation. When viewing the segmented image as per the code below, the segmentation is reasonable/correct. However, when looking at(at attempting to use) the segmrntation mask values, I am getting some very large numbers, and not the enumerated values one would expect from the cv::GrabCutClasses enum.
void doGrabCut(){
Vector2i imgDims = getImageDims();
//Wite image to OpenCV Mat.
const Vector4u *rgb = getRGB();
cv::Mat rgbMat(imgDims.height, imgDims.width, CV_8UC3);
for (int i = 0; i < imgDims.height; i++) {
for (int j = 0; j < imgDims.width; j++) {
int idx = i * imgDims.width + j;
rgbMat.ptr<cv::Vec3b>(i)[j][2] = rgb[idx].x;
rgbMat.ptr<cv::Vec3b>(i)[j][1] = rgb[idx].y;
rgbMat.ptr<cv::Vec3b>(i)[j][0] = rgb[idx].z;
}
}
//Do graph cut.
cv::Mat res, fgModel, bgModel;
cv::Rect bb(bb_begin.x, bb_begin.y, bb_end.x - bb_begin.x, bb_end.y - bb_begin.y);
cv::grabCut(rgbMat, res, bb, bgModel, fgModel, 10, cv::GC_INIT_WITH_RECT);
cv::compare(res, cv::GC_PR_FGD, res, cv::CMP_EQ);
//Write mask.
Vector4u *maskPtr = getMask();//uchar
for (int i = 0; i < imgDims.height; i++) {
for (int j = 0; j < imgDims.width; j++) {
cv::GrabCutClasses classification = res.at<cv::GrabCutClasses>(i, j);
int idx = i * imgDims.width + j;
std::cout << classification << std::endl;//Strange numbers here.
maskPtr[idx].x = (classification == cv::GC_PR_FGD) ? 255 : 0;//This always evaluates to 0.
}
}
cv::Mat foreground(rgbMat.size(), CV_8UC3, cv::Scalar(255, 255, 255));
rgbMat.copyTo(foreground, res);
cv::imshow("GC Output", foreground);
}
Why would one get numbers outside the enumeration when the segmentation is qualitatively correct?
I doubt on your //Write mask. step, why do you re-iterate the res and modify maskPtr as maskPtr[idx].x = (classification == cv::GC_PR_FGD) ? 255 : 0;, Basically you already have a single channel Binary image stored in the res variable, the cv::compare() returns a binary image
However if you still want to debug the values by iteration then you should use the standard technique for iterating a single channel image as:
for (int i = 0; i < m.rows; i++) {
for (int j = 0; j < m.cols; j++) {
uchar classification = res.at<uchar>(i, j);
std::cout << int(classification) << ", ";
}
}
As you are iterating a single channel mat you must use res.at<uchar>(i, j) and not res.at<cv::GrabCutClasses>.
everyone I am trying to implement patter matching with FFT but I am not sure what the result should be (I think I am missing something even though a read a lot of stuff about the problem and tried a lot of different implementations this one is the best so far). Here is my FFT correlation function.
void fft2d(fftw_complex**& a, int rows, int cols, bool forward = true)
{
fftw_plan p;
for (int i = 0; i < rows; ++i)
{
p = fftw_plan_dft_1d(cols, a[i], a[i], forward ? FFTW_FORWARD : FFTW_BACKWARD, FFTW_ESTIMATE);
fftw_execute(p);
}
fftw_complex* t = (fftw_complex*)fftw_malloc(rows * sizeof(fftw_complex));
for (int j = 0; j < cols; ++j)
{
for (int i = 0; i < rows; ++i)
{
t[i][0] = a[i][j][0];
t[i][1] = a[i][j][1];
}
p = fftw_plan_dft_1d(rows, t, t, forward ? FFTW_FORWARD : FFTW_BACKWARD, FFTW_ESTIMATE);
fftw_execute(p);
for (int i = 0; i < rows; ++i)
{
a[i][j][0] = t[i][0];
a[i][j][1] = t[i][1];
}
}
fftw_free(t);
}
int findCorrelation(int argc, char* argv[])
{
BMP bigImage;
BMP keyImage;
BMP result;
RGBApixel blackPixel = { 0, 0, 0, 1 };
const bool swapQuadrants = (argc == 4);
if (argc < 3 || argc > 4) {
cout << "correlation img1.bmp img2.bmp" << endl;
return 1;
}
if (!keyImage.ReadFromFile(argv[1])) {
return 1;
}
if (!bigImage.ReadFromFile(argv[2])) {
return 1;
}
//Preparations
const int maxWidth = std::max(bigImage.TellWidth(), keyImage.TellWidth());
const int maxHeight = std::max(bigImage.TellHeight(), keyImage.TellHeight());
const int rowsCount = maxHeight;
const int colsCount = maxWidth;
BMP bigTemp = bigImage;
BMP keyTemp = keyImage;
keyImage.SetSize(maxWidth, maxHeight);
bigImage.SetSize(maxWidth, maxHeight);
for (int i = 0; i < rowsCount; ++i)
for (int j = 0; j < colsCount; ++j) {
RGBApixel p1;
if (i < bigTemp.TellHeight() && j < bigTemp.TellWidth()) {
p1 = bigTemp.GetPixel(j, i);
} else {
p1 = blackPixel;
}
bigImage.SetPixel(j, i, p1);
RGBApixel p2;
if (i < keyTemp.TellHeight() && j < keyTemp.TellWidth()) {
p2 = keyTemp.GetPixel(j, i);
} else {
p2 = blackPixel;
}
keyImage.SetPixel(j, i, p2);
}
//Here is where the transforms begin
fftw_complex **a = (fftw_complex**)fftw_malloc(rowsCount * sizeof(fftw_complex*));
fftw_complex **b = (fftw_complex**)fftw_malloc(rowsCount * sizeof(fftw_complex*));
fftw_complex **c = (fftw_complex**)fftw_malloc(rowsCount * sizeof(fftw_complex*));
for (int i = 0; i < rowsCount; ++i) {
a[i] = (fftw_complex*)fftw_malloc(colsCount * sizeof(fftw_complex));
b[i] = (fftw_complex*)fftw_malloc(colsCount * sizeof(fftw_complex));
c[i] = (fftw_complex*)fftw_malloc(colsCount * sizeof(fftw_complex));
for (int j = 0; j < colsCount; ++j) {
RGBApixel p1;
p1 = bigImage.GetPixel(j, i);
a[i][j][0] = (0.299*p1.Red + 0.587*p1.Green + 0.114*p1.Blue);
a[i][j][1] = 0.0;
RGBApixel p2;
p2 = keyImage.GetPixel(j, i);
b[i][j][0] = (0.299*p2.Red + 0.587*p2.Green + 0.114*p2.Blue);
b[i][j][1] = 0.0;
}
}
fft2d(a, rowsCount, colsCount);
fft2d(b, rowsCount, colsCount);
result.SetSize(maxWidth, maxHeight);
for (int i = 0; i < rowsCount; ++i)
for (int j = 0; j < colsCount; ++j) {
fftw_complex& y = a[i][j];
fftw_complex& x = b[i][j];
double u = x[0], v = x[1];
double m = y[0], n = y[1];
c[i][j][0] = u*m + n*v;
c[i][j][1] = v*m - u*n;
int fx = j;
if (fx>(colsCount / 2)) fx -= colsCount;
int fy = i;
if (fy>(rowsCount / 2)) fy -= rowsCount;
float r2 = (fx*fx + fy*fy);
const double cuttoffCoef = (maxWidth * maxHeight) / 37992.;
if (r2<128 * 128 * cuttoffCoef)
c[i][j][0] = c[i][j][1] = 0;
}
fft2d(c, rowsCount, colsCount, false);
const int halfCols = colsCount / 2;
const int halfRows = rowsCount / 2;
if (swapQuadrants) {
for (int i = 0; i < halfRows; ++i)
for (int j = 0; j < halfCols; ++j) {
std::swap(c[i][j][0], c[i + halfRows][j + halfCols][0]);
std::swap(c[i][j][1], c[i + halfRows][j + halfCols][1]);
}
for (int i = halfRows; i < rowsCount; ++i)
for (int j = 0; j < halfCols; ++j) {
std::swap(c[i][j][0], c[i - halfRows][j + halfCols][0]);
std::swap(c[i][j][1], c[i - halfRows][j + halfCols][1]);
}
}
for (int i = 0; i < rowsCount; ++i)
for (int j = 0; j < colsCount; ++j) {
const double& g = c[i][j][0];
RGBApixel pixel;
pixel.Alpha = 0;
int gInt = 255 - static_cast<int>(std::floor(g + 0.5));
pixel.Red = gInt;
pixel.Green = gInt;
pixel.Blue = gInt;
result.SetPixel(j, i, pixel);
}
BMP res;
res.SetSize(maxWidth, maxHeight);
result.WriteToFile("result.bmp");
return 0;
}
Sample output
This question would probably be more appropriately posted on another site like cross validated (metaoptimize.com used to also be a good one, but it appears to be gone)
That said:
There's two similar operations you can perform with FFT: convolution and correlation. Convolution is used for determining how two signals interact with each-other, whereas correlation can be used to express how similar two signals are to each-other. Make sure you're doing the right operation as they're both commonly implemented throught a DFT.
For this type of application of DFTs you usually wouldn't extract any useful information in the fourier spectrum unless you were looking for frequencies common to both data sources or whatever (eg, if you were comparing two bridges to see if their supports are spaced similarly).
Your 3rd image looks a lot like the power domain; normally I see the correlation output entirely grey except where overlap occurred. Your code definitely appears to be computing the inverse DFT, so unless I'm missing something the only other explanation I've come up with for the fuzzy look could be some of the "fudge factor" code in there like:
if (r2<128 * 128 * cuttoffCoef)
c[i][j][0] = c[i][j][1] = 0;
As for what you should expect: wherever there are common elements between the two images you'll see a peak. The larger the peak, the more similar the two images are near that region.
Some comments and/or recommended changes:
1) Convolution & correlation are not scale invariant operations. In other words, the size of your pattern image can make a significant difference in your output.
2) Normalize your images before correlation.
When you get the image data ready for the forward DFT pass:
a[i][j][0] = (0.299*p1.Red + 0.587*p1.Green + 0.114*p1.Blue);
a[i][j][1] = 0.0;
/* ... */
How you grayscale the image is your business (though I would've picked something like sqrt( r*r + b*b + g*g )). However, I don't see you doing anything to normalize the image.
The word "normalize" can take on a few different meanings in this context. Two common types:
normalize the range of values between 0.0 and 1.0
normalize the "whiteness" of the images
3) Run your pattern image through an edge enhancement filter. I've personally made use of canny, sobel, and I think I messed with a few others. As I recall, canny was "quick'n dirty", sobel was more expensive, but I got comparable results when it came time to do correlation. See chapter 24 of the "dsp guide" book that's freely available online. The whole book is worth your time, but if you're low on time then at a minimum chapter 24 will help a lot.
4) Re-scale the output image between [0, 255]; if you want to implement thresholds, do it after this step because the thresholding step is lossy.
My memory on this one is hazy, but as I recall (edited for clarity):
You can scale the final image pixels (before rescaling) between [-1.0, 1.0] by dividing off the largest power spectrum value from the entire power spectrum
The largest power spectrum value is, conveniently enough, the center-most value in the power spectrum (corresponding to the lowest frequency)
If you divide it off the power spectrum, you'll end up doing twice the work; since FFTs are linear, you can delay the division until after the inverse DFT pass to when you're re-scaling the pixels between [0..255].
If after rescaling most of your values end up so black you can't see them, you can use a solution to the ODE y' = y(1 - y) (one example is the sigmoid f(x) = 1 / (1 + exp(-c*x) ), for some scaling factor c that gives better gradations). This has more to do with improving your ability to interpret the results visually than anything you might use to programmatically find peaks.
edit I said [0, 255] above. I suggest you rescale to [128, 255] or some other lower bound that is gray rather than black.
I have one function dealing with image. In that function, i am trying to find several particular ellipses. The code is working if i call it individually in a separate project, but in the whole project, it crashed when it returns.
I used many vectors in the processing to store some information during the process.
The error information:
Windows has triggered a breakpoint in KinectBridgeWithOpenCVBasics-D2D.exe.
This may be due to a corruption of the heap, which indicates a bug in KinectBridgeWithOpenCVBasics-D2D.exe or any of the DLLs it has loaded.
This may also be due to the user pressing F12 while KinectBridgeWithOpenCVBasics-D2D.exe has focus.
The output window may have more diagnostic information.
could any one tell me where is wrong to cause this crash. More weird is it is working in the separate project.
The code is a little long, but it is really noting, just looking for several particular ellipses with some pattern.
Thank you.
int FindNao(Mat* pImg, double* x, double* y)
{
// Fail if pointer is invalid
if (!pImg)
{
return 2;
}
// Fail if Mat contains no data
if (pImg->empty())
{
return 3;
}
//*x = 0; *y = 0;
Mat localMat = *pImg; // save a local copy of the image
cvtColor(~localMat, localMat, CV_BGR2GRAY); // Convert to gray image
threshold(localMat, localMat, 165, 255, THRESH_BINARY); // Convert into black-white image
Mat elementOpen = getStructuringElement(MORPH_ELLIPSE, Size(5,5), Point(-1,-1));
morphologyEx(localMat, localMat, MORPH_OPEN, elementOpen, Point(-1,-1), 1);
// Find all the contours in the blak-white image
vector<vector<Point>> contours;
findContours(localMat.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
localMat.release();
// Calculate the area of each contour
vector<double> areas; int num = contours.size();
/* If no contours are found, return S_OK */
if(num < 1)
return 1;
for(int i = 0; i < num; i++)
{
areas.push_back(contourArea(contours[i]));
}
// First round of selection
// The area is small, and they are like a ellipse and around the middle in X direction and at the upper part of the image
vector<RotatedRect> selected_ellipses; // store the fitted ellipse fitted to the potential contour
vector<double> selected_areas; // store the contour area of the potential contour
int imgX = localMat.cols; int imgY = localMat.rows; // get the demension of the image
for(int i=0; i < num - 1; i++)
{
if(areas[i] < 350 && areas[i] > 10)
{
// fit an ellipse
RotatedRect ellipse1 = fitEllipse(Mat(contours[i]));
// it is a reasonable ellipse, and the area should be close to the
double length1 = ellipse1.size.height;
double length2 = ellipse1.size.width;
if( abs(1 - length1/length2) <= 0.8 &&
abs(1 - areas[i] / (CV_PI * length1 * length2 / 4) ) <= 0.2 )
{
selected_ellipses.push_back(ellipse1);
selected_areas.push_back(areas[i]);
}
}
}
/************ Second round of selection **************/
// Calculate each ellipse's dimension
vector<double> diff_dimension;
vector<double> ave_dimention;
/* If no contours are found, return S_OK */
if(selected_ellipses.size() < 1)
return 1;
for(int i = 0; i < selected_ellipses.size(); i++)
{
double difference = abs(1 - selected_ellipses[i].size.height / selected_ellipses[i].size.width);
diff_dimension.push_back(difference);
double average = (selected_ellipses[i].size.height + selected_ellipses[i].size.width) / 2;
ave_dimention.push_back(average);
}
vector<vector<int>> eyematches;
vector<vector<int>> cammatches;
// go over all the ellipses to find the matches with close area and dimension.
for(int i = 0; i < selected_ellipses.size() - 1; i++)
{
for(int j = i+1; j < selected_ellipses.size(); j++)
{
// looking for the eyes
if(diff_dimension[i] < 0.05 && diff_dimension[j] < 0.05)
{
double diff_area = abs( 1 - selected_areas[i] / selected_areas[j] );
if (diff_area < 0.05)
{
double diff_y = abs(selected_ellipses[i].center.y - selected_ellipses[j].center.y);
if(diff_y < 10)
{
vector<int> match1;
match1.push_back(i); match1.push_back(j);
eyematches.push_back(match1);
}
}
}
// looking for the cameras
double diff_x = abs(selected_ellipses[i].center.x - selected_ellipses[j].center.x);
if (diff_x < 10)
{
vector<int> match2;
match2.push_back(i); match2.push_back(j);
cammatches.push_back(match2);
}
}
}
/* Last check */
int num_eyes = eyematches.size();
int num_cams = cammatches.size();
if(num_eyes == 0 || num_cams == 0)
return 1;
// Calculate the vector between two eyes and the center
vector<Point> vector_eyes; vector<Point> center_eyes;
vector<vector<int>>::iterator ite = eyematches.begin();
while(ite < eyematches.end())
{
Point point;
point.x = selected_ellipses[(*ite)[0]].center.x - selected_ellipses[(*ite)[1]].center.x;
point.y = selected_ellipses[(*ite)[0]].center.y - selected_ellipses[(*ite)[1]].center.y;
vector_eyes.push_back(point);
point.x = (selected_ellipses[(*ite)[0]].center.x + selected_ellipses[(*ite)[1]].center.x)/2;
point.y = (selected_ellipses[(*ite)[0]].center.y + selected_ellipses[(*ite)[1]].center.y)/2;
center_eyes.push_back(point);
ite++;
}
// Calculate the vector between two cameras and the center
vector<Point> vector_cams; vector<Point> center_cams;
ite = cammatches.begin();
while(ite < cammatches.end())
{
Point point;
point.x = selected_ellipses[(*ite)[0]].center.x - selected_ellipses[(*ite)[1]].center.x;
point.y = selected_ellipses[(*ite)[0]].center.y - selected_ellipses[(*ite)[1]].center.y;
vector_cams.push_back(point);
point.x = (selected_ellipses[(*ite)[0]].center.x + selected_ellipses[(*ite)[1]].center.x)/2;
point.y = (selected_ellipses[(*ite)[0]].center.y + selected_ellipses[(*ite)[1]].center.y)/2;
center_cams.push_back(point);
ite++;
}
// Match the eyes and cameras, by calculating the center distances and intersection angle
vector<vector<int>> matches_eye_cam;
vector<vector<double>> matches_parameters;
for(int i = 0; i < num_eyes; i++)
{
for(int j = 0; j < num_cams; j++)
{
vector<int> temp1;
vector<double> temp2;
// calculate the distances
double distance = sqrt( double( (center_eyes[i].x - center_cams[j].x)^2 + (center_eyes[i].y - center_cams[j].y)^2 ) );
// calculate the cosine intersection angle
double cosAngle = vector_eyes[i].x * vector_cams[j].x + vector_eyes[i].y * vector_cams[j].y;
// store everything
temp1.push_back(i); temp1.push_back(j);
temp2.push_back(distance); temp2.push_back(cosAngle);
matches_eye_cam.push_back(temp1);
matches_parameters.push_back(temp2);
}
}
// go over to find the minimum
int min_dis = 0; int min_angle = 0;
vector<vector<double>>::iterator ite_para = matches_parameters.begin();
/* If no contours are found, return S_OK */
if(matches_parameters.size() < 1)
return 1;
for(int i = 1; i < matches_parameters.size(); i++)
{
if( (*(ite_para+min_dis))[0] > (*(ite_para+i))[0] )
min_dis = i;
if( (*(ite_para+min_angle))[1] > (*(ite_para+i))[1] )
min_angle = i;
}
// get the best match of eyes and cameras 's index
int eyes_index, cams_index;
vector<vector<int>>::iterator ite_match_eye_cam = matches_eye_cam.begin();
if(min_dis == min_angle)
{
// perfect match
eyes_index = (*(ite_match_eye_cam + min_dis))[0];
cams_index = (*(ite_match_eye_cam + min_dis))[1];
}
else
{
// tried to fuse them and find a better sulotion, but didnot work out, so
// go with the min_dis
eyes_index = (*(ite_match_eye_cam + min_dis))[0];
cams_index = (*(ite_match_eye_cam + min_dis))[1];
}
vector<vector<int>>::iterator ite_eyes = eyematches.begin();
vector<vector<int>>::iterator ite_cams = cammatches.begin();
// draw the eyes
ellipse(*pImg, selected_ellipses[(*(ite_eyes+eyes_index))[0]], Scalar(0, 255, 255), 2, 8);
ellipse(*pImg, selected_ellipses[(*(ite_eyes+eyes_index))[1]], Scalar(0, 255, 255), 2, 8);
// draw the camera
ellipse(*pImg, selected_ellipses[(*(ite_cams+cams_index))[0]], Scalar(0, 255, 0), 2, 8);
ellipse(*pImg, selected_ellipses[(*(ite_cams+cams_index))[1]], Scalar(0, 255, 0), 2, 8);
imshow("show", *pImg);
// find the upper camera
int m1 = (*(ite_cams+cams_index))[0];
int m2 = (*(ite_cams+cams_index))[1];
int upper;
if(selected_ellipses[m1].center.y < selected_ellipses[m2].center.y)
upper = m1;
else
upper = m2;
*x = selected_ellipses[upper].center.x;
*y = selected_ellipses[upper].center.y;
return 1;
}
int main()
{
Mat imO = imread("Capture.PNG");
double x, y;
FindNao(&imO, &x, &y);
cout<<x<<" "<<y<<endl;
cvWaitKey(0);
}