I have an OpenCV program which uses SURF to detect if a template object is detected within the video stream. I am looking to print out the object name when the object is detected but at the moment it seems to be printing whenever a "good" feature match has been found which for the vast majority of the time are false positives.
My program is as follows:
//Step 1: Detect keypoints using SURF detector
//Step 2: Calculate descriptors (feature vectors)
//Step 3: Matching descriptor vectors using FLANN matcher
//Step 4: Localise the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
if() {
std::cout << fileNamePostCut << std::endl;
}
...
I'm not sure which condition to state in order to print the object name (fileNamePostCut)
Your goal is to dismiss false positives. You should pursue two approaches:
First, use the SIFT ratio test to dismiss unclear matches
You are calculating a homography from your matches. Use it as a model for correct matches: cv::findHomography has an optional output called mask. Use it to determine how many matches have actually contributed to the homography (these are called inliers). The more, the better - only print the object's name if you have more than 10 inliers for example.
Related
I'm trying to recognize objects by using SURF algorithm. Since I had some problems while installing the nonfree module, I decided to use an older version of OpenCV(2.4.11 I'm running it in Visual Studio 2013).
Now, I have some errors that aren't related to the sintax and I don't know what to do. Here is the code:
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
using namespace std;
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector( 500 );
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
int main()
{
//reference image
Mat object = imread( "C:\\Users\\patri\\Desktop\\test.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
VideoCapture cap(0);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
//capture one frame from video and store it into image object name 'frame'
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
//converting captured frame into gray scale
cvtColor(frame, image, CV_RGB2GRAY);
//extract detectors and descriptors of captured frame
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
//find matching descriptors of reference and captured image
matcher.knnMatch(des_object, des_image, matches, 2);
//finding matching keypoints with Euclidean distance 0.6 times the distance of next keypoint
//used to find right matches
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++)
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, frame, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//3 good matches are enough to describe an object as a right match.
if (good_matches.size() >= 3)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
try
{
H = findHomography( obj, scene, CV_RANSAC );
}
catch(Exception e){}
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
//clear array
good_matches.clear();
key = waitKey(1);
}
return 0;
}
I'm also attaching a picture of the errors I get. Please help me find the problem.
errors
About the warnings ("conversion from int to float"), I suppose the problem is the use of integer initializing cv::Point2f(); so, in lines 112-115, you should use Point2f( object.cols, 0.0) instead of Point2f( object.cols, 0).
But are only warnings.
The real problem is the linker error: you need the library with cv::SURF::SURF() (used by SurfFeatureDetector detector( 500 ); and SurfDescriptorExtractor extractor;).
You should add a library that I suppose is, in you case, opencv_nonfree241.lib. Or a similar name.
I'm running the following code. The goal is to detect if the picture "card" is present on the "board", which is the screenshot that could contain that card.
The detection works nearly perfect, but when I try to draw it I notice that some lines are too far apart.
While the dots coming from the object are in a perfect distance from the scene, in the "scene" they are often too far apart, giving a wrong result.
As you can tell from the following screenshot. The object is detected in the scene, but many lines are way out of position. I wish too just drop the lines that are too far apart.
I think with my function it removes any lines that are too far apart when you compare the starting points on the object. However, this doesn't seem to happen for the points that are too far apart on the scene. How could I remove those?
bool isCardOnBoard(Mat card, string filename) {
//-- Step 1: Detect the keypoints using SURF Detector
vector<KeyPoint> keypoints_object;
detector.detect( card, keypoints_object );
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_object;
extractor.compute( card, keypoints_object, descriptors_object );
//-- Step 3: Matching descriptor vectors using FLANN matcher
// FlannBasedMatcher matcher;
BFMatcher matcher(extractor.defaultNorm(), false);
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
// printf("-- Max dist : %f \n", max_dist );
// printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ ) {
if( matches[i].distance < 3*min_dist)
good_matches.push_back( matches[i]);
}
if (good_matches.size() > 100) {
cout << filename << " NOT on the board" << endl;
return false;
}
Mat img_matches;
drawMatches( card, keypoints_object, board, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
// vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
// cout << good_matches.size() << endl;
for( int i = 0; i < good_matches.size(); i++ ) {
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( card.cols, 0 );
obj_corners[2] = cvPoint( card.cols, card.rows ); obj_corners[3] = cvPoint( 0, card.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( card.cols, 0), scene_corners[1] + Point2f( card.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( card.cols, 0), scene_corners[2] + Point2f( card.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( card.cols, 0), scene_corners[3] + Point2f( card.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( card.cols, 0), scene_corners[0] + Point2f( card.cols, 0), Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return true;
}
"The lines that are too far apart" as you said represent outliers: False positive matches from matcher.match( descriptors_object, descriptors_scene, matches );
When you estimate the homography H, you internally use statistical methods in order to reject those outliers. The method used here is called RANSAC. Another method available in OpenCV function is LMeDS.
As explain in OpenCV documentation: The method RANSAC can handle practically any ratio of outliers but it needs a threshold to distinguish inliers from outliers. The method LMeDS does not need any threshold but it works correctly only when there are more than 50% of inliers.
I suggest you try differents thresholds for RANSAC or try LMeDS instead.
Note that the printed characters in the scene will surely give you outliers..
If you just want to "drop the lines that are too far apart" (why?), you may want to draw only lines from matches in the re-projected object (?)
I am making a program that tracks features with ORB from OpenCV (2.43) I followed
this tutorial and used advice from here.
My goal is to track the object in video feed (face) and draw a rectangle around it.
My program finds keypoints and matches them correctly, but when I try to use findHomography + perspectiveTransform to find new corners for the image usually returns some nonsense type values (though sometimes it returns correct homography).
Here is an example picture:
Here is the corresponding problematic part:
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
Rest of the code is practically the same as in the links I provided.
The lines drawn seem completley random, my goal is only to get minimal rectangle of the source object in new scene, so if there is alternative to using homography that works too.
P.S. Source image to track is a region that is copied from video input and then tracked in new pictures from that input, does it matter?
The function perspectiveTransform estimates the homography under the assumption that your corresponding points set are not error prone. However, in real world data you cannot assume that. The solution is to use a robust estimation function such as the RANSAC to solve the homography problem as an overdetermine system of equations.
You can use the findHomography function instead which returns a homography. The input of this function is a set of points. This set needs at least 4 point but a larger set is better. The homography is only an estimate but which is more robust against errors. By using the CV_RANSAC flag it is able to remove outliers (wrong point correspondences) internaly.
I have been working with Opencv for such time. This time, I faced a problem that irritated me so much.
In fact, I have a template image and i want to use the matching to recognize it in my camera stream but I face such console error:
OpenCV Error: Unsupported format or combination of formats (type=0
) in unknown function, file ..\..\..\opencv\modules\flann\src\miniflann.cpp, lin
e 299
In fact this is the code and it compiles well but the error appears in execution.
#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/features2d/features2d.hpp"
//#include "opencv2/legacy/legacy.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
using namespace std;
int main()
{
//reference image
Mat object = imread( "tel_tmpl.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector( 500 );
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
VideoCapture cap(0);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
//capture one frame from video and store it into image object name 'frame'
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
//converting captured frame into gray scale
cvtColor(frame, image, CV_RGB2GRAY);
//extract detectors and descriptors of captured frame
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
//find matching descriptors of reference and captured image
matcher.knnMatch(des_object, des_image, matches, 2);
//finding matching keypoints with Euclidean distance 0.6 times the distance of next keypoint
//used to find right matches
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++)
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, frame, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//3 good matches are enough to describe an object as a right match.
if (good_matches.size() >= 3)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
try
{
H = findHomography( obj, scene, CV_RANSAC );
}
catch(Exception e){}
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
//clear array
good_matches.clear();
key = waitKey(1);
}
return 0;
}
Thanks in advance
When I changed the camera, it works good, I don't know why should I change the camera?!
The thing is I am unable to implement the center of mass with the existing code, which image object to use etc after the detected object is bounded by the rectangle so that I may get the trajectory of the path.
I am using Opencv2.3 .I found out there are 2 methods - Link1 and Link2 talk about the usage of moments. And the other method is to use the information of the bounding box Link3. The method of moments requires image thresholding. However, when using SURF the image is in gray scale. So, on passing a gray image for thresholding displays a white image! Now, I am having a tough time in understanding how I should calculate the centroid using the code below (esp what should I use instead of points[i].x since I am using
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt )
where in my case numPoints=good_matches.size(), denoting the number of feature points) as mentioned in the documentation. If anyone can put up an implementation of how to use SURF with centroid then it will be helpful.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
int main()
{
Mat object = imread( "object.png", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][4].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
key = waitKey(1);
}
return 0;
}
so, you already got your pointlists,
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
i think, it's perfectly valid, to calc the centroid based on that, no further image processing nessecary.
there's 2 methods, the 'center of mass' way, that's just the mean position of all points, like this:
Point2f cen(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
cen.x += scene[i].x;
cen.y += scene[i].y;
}
cen.x /= scene.size();
cen.y /= scene.size();
and the 'center of bbox' way
Point2f pmin(1000000,1000000);
Point2f pmax(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
if ( scene[i].x < pmin.x ) pmin.x = scene[i].x;
if ( scene[i].y < pmin.y ) pmin.y = scene[i].y;
if ( scene[i].x > pmax.x ) pmax.x = scene[i].x;
if ( scene[i].y > pmax.y ) pmax.y = scene[i].y;
}
Point2f cen( (pmax.x-pmin.x)/2, (pmax.y-pmin.y)/2);
note, that the results will be different ! they're only the same for circles & squares, point symmetric objects
// now draw a circle around the centroid:
cv::circle( img, cen, 10, Scalar(0,0,255), 2 );
// and a line connecting the query and train points:
cv::line( img, scene[i], obj[i], Scalar(255,0,0), 2 );