Stitching 3 images - c++

the function stitches two images...that works fine...
function:
for( int i = 1; i<(4); i++ )
{
test=Bildpaar(test,image1[2+z]);
z=z+1;
}
imwrite("../superrechts.jpg",test);
but the second iteration fails because there is a Black part of the pano image and I can only see the third image...
my function
Mat Bildpaar(Mat Bild1,Mat Bild2)
{
Mat img_keypoints_1;
std::vector<KeyPoint> keypoints_1,keypoints_2;//anzahl der keypoint vektoren
int minHessian = 50;
FastFeatureDetector detector( minHessian);
detector.detect( Bild1, keypoints_1 );
detector.detect( Bild2, keypoints_2 );
//-- Draw keypoints
drawKeypoints( Bild1, keypoints_1, img_keypoints_1, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//%%%%%%%%%%%%%%%%%%%%%%%%&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
//Discriptor
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
SiftDescriptorExtractor extractor; //Zeit
Mat descriptors_1,descriptors_2;
extractor.compute( Bild1, keypoints_1, descriptors_1 );
extractor.compute( Bild2, keypoints_2, descriptors_2 );
// waitKey(0);
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//Matcher
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//-- Step 3: Matching descriptor vectors using FLANN matcher
std::vector< DMatch > matches;
FlannBasedMatcher matcher;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
Mat img_matches;
drawMatches(Bild1, keypoints_1, Bild2, keypoints_2, matches, img_matches);
//namedWindow( filename, CV_WINDOW_NORMAL );// Create a window for display.
//imshow(filename, img_matches[i]);
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance < 2*min_dist )
{ good_matches.push_back( matches[i]);
}
}
Mat img_matches1;
drawMatches( Bild1, keypoints_1, Bild2, keypoints_2,
good_matches, img_matches1, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//bis hierhin geht es
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
std::vector< Point2f > obj1;
std::vector< Point2f > scene1;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj1.push_back( keypoints_1[ good_matches[i].queryIdx ].pt );
scene1.push_back( keypoints_2[ good_matches[i].trainIdx ].pt );
}
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
// Find the Homography Matrix
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%#
Mat H;
H= findHomography( scene1, obj1, CV_RANSAC,5 );
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( Bild1.cols, 0 );
obj_corners[2] = cvPoint( Bild1.cols, Bild1.rows ); obj_corners[3] = cvPoint( 0, Bild1.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Show detected matches
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches1, scene_corners[0] + Point2f( Bild1.cols, 0), scene_corners[1] + Point2f( Bild1.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches1, scene_corners[1] + Point2f( Bild1.cols, 0), scene_corners[2] + Point2f( Bild1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches1, scene_corners[2] + Point2f( Bild1.cols, 0), scene_corners[3] + Point2f( Bild1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches1, scene_corners[3] + Point2f( Bild1.cols, 0), scene_corners[0] + Point2f( Bild1.cols, 0), Scalar( 0, 255, 0), 4 );
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
//Warp images
//%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Mat result;
warpPerspective(Bild2, result, H, Size(Bild2.cols*2, Bild2.rows), INTER_CUBIC);
Mat final(Size(Bild2.cols + Bild1.cols, Bild2.rows),CV_8UC3);//richtig //testen ob Spaltenanzahl auswirkungen auf das ergebnis hat
//velikost img1
Mat roi1(final, Rect(0, 0, Bild1.cols, Bild1.rows));
Mat roi2(final, Rect(0, 0, result.cols, result.rows));
result.copyTo(roi2);
Bild1.copyTo(roi1);
return final;
}
In my view I make some misstakes with the roi...
what can I do I try blending but it works only of the images have the same size...or I can remove the black part...What is the best thing I can do?
Maybe I copy one roi over another I try to change the order of
result.copyTo(roi2);
Bild1.copyTo(roi1);
In one case I saw the left picture and in the other case the right picture...how can I see both together?
best regards
I found one way (but its not a good way to do it).
Firstly you have stitch 2 images (called final1)
Secondly when u stitch final1 and image 3 you have to blend the rois then it works...and you have to play arround with alpha...
Mat blendo;
double alpha=0.3;
double beta = ( 1.0 - alpha );
addWeighted( roi1, alpha, roi2, beta, 0.0, blendo);

Related

gcc linking error: undefined reference to symbol '_ZN2cv5flann12SearchParamsC1Eifb', [duplicate]

This question already has answers here:
What is an undefined reference/unresolved external symbol error and how do I fix it?
(39 answers)
Closed 7 years ago.
I am using OpenCv along with Eclipse to implement matching with homography. I am getting the following error while building it:
/usr/bin/ld: ./src/flann.o: undefined reference to symbol
'_ZN2cv5flann12SearchParamsC1Eifb'
An image of compilation error.
Here's my code:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
void readme();
/** #function main */
int main()
{
Mat img_object = imread( "/home/gaps/Desktop/DSC00268.JPG");
Mat img_scene = imread( "/home/gaps/Desktop/chart_1-2_1816185b.jpg" );
if( !img_object.data || !img_scene.data )
{
std::cout<< " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}
/** #function readme */
void readme()
{
std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl;
}
You are not linking libopencv_flann, in which the undefined symbol is defined. Add -lopencv_flann to your linker options.

How to find the exact corners of template image inside a source image using surf algorithm in opencv

I am using the below code provided at opencv site to find the scale and rotation invariant template matching,but I want to calculate exact pixel co-ordinates of template image in source image,especially for rotated image.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
void readme();
/** #function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{
readme();
return -1;
}
Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{
std::cout<< " --(!) Error reading images " << std::endl;
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist )
min_dist = dist;
if( dist > max_dist )
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{
good_matches.push_back( matches[i]);
}
}
Mat img_matches;
drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows );
obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( img_object.cols, 0), scene_corners[1] + Point2f( img_object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( img_object.cols, 0), scene_corners[2] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( img_object.cols, 0), scene_corners[3] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( img_object.cols, 0), scene_corners[0] + Point2f( img_object.cols, 0), Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return 0;
}
/** #function readme */
void readme()
{
std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl;
}
You already have these coordinates in scene_corners after computing the perspective transform. The four points in scene_corners make up a rectangle that is the template match found in the scene, so these four corners are what you're looking for.
Further, if you wish to find the trasnformed coordinates of any point from the template in the scene, you just need to apply the same homography, via the perspectiveTransform function, to that point.

OpenCV - BFMatcher only checks for distance between features on the object and not on the scene?

I'm running the following code. The goal is to detect if the picture "card" is present on the "board", which is the screenshot that could contain that card.
The detection works nearly perfect, but when I try to draw it I notice that some lines are too far apart.
While the dots coming from the object are in a perfect distance from the scene, in the "scene" they are often too far apart, giving a wrong result.
As you can tell from the following screenshot. The object is detected in the scene, but many lines are way out of position. I wish too just drop the lines that are too far apart.
I think with my function it removes any lines that are too far apart when you compare the starting points on the object. However, this doesn't seem to happen for the points that are too far apart on the scene. How could I remove those?
bool isCardOnBoard(Mat card, string filename) {
//-- Step 1: Detect the keypoints using SURF Detector
vector<KeyPoint> keypoints_object;
detector.detect( card, keypoints_object );
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_object;
extractor.compute( card, keypoints_object, descriptors_object );
//-- Step 3: Matching descriptor vectors using FLANN matcher
// FlannBasedMatcher matcher;
BFMatcher matcher(extractor.defaultNorm(), false);
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
// printf("-- Max dist : %f \n", max_dist );
// printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ ) {
if( matches[i].distance < 3*min_dist)
good_matches.push_back( matches[i]);
}
if (good_matches.size() > 100) {
cout << filename << " NOT on the board" << endl;
return false;
}
Mat img_matches;
drawMatches( card, keypoints_object, board, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
// vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
// cout << good_matches.size() << endl;
for( int i = 0; i < good_matches.size(); i++ ) {
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( card.cols, 0 );
obj_corners[2] = cvPoint( card.cols, card.rows ); obj_corners[3] = cvPoint( 0, card.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( card.cols, 0), scene_corners[1] + Point2f( card.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( card.cols, 0), scene_corners[2] + Point2f( card.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( card.cols, 0), scene_corners[3] + Point2f( card.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( card.cols, 0), scene_corners[0] + Point2f( card.cols, 0), Scalar( 0, 255, 0), 4 );
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return true;
}
"The lines that are too far apart" as you said represent outliers: False positive matches from matcher.match( descriptors_object, descriptors_scene, matches );
When you estimate the homography H, you internally use statistical methods in order to reject those outliers. The method used here is called RANSAC. Another method available in OpenCV function is LMeDS.
As explain in OpenCV documentation: The method RANSAC can handle practically any ratio of outliers but it needs a threshold to distinguish inliers from outliers. The method LMeDS does not need any threshold but it works correctly only when there are more than 50% of inliers.
I suggest you try differents thresholds for RANSAC or try LMeDS instead.
Note that the printed characters in the scene will surely give you outliers..
If you just want to "drop the lines that are too far apart" (why?), you may want to draw only lines from matches in the re-projected object (?)

OpenCV : How to find the center of mass/centroid for motion information

The thing is I am unable to implement the center of mass with the existing code, which image object to use etc after the detected object is bounded by the rectangle so that I may get the trajectory of the path.
I am using Opencv2.3 .I found out there are 2 methods - Link1 and Link2 talk about the usage of moments. And the other method is to use the information of the bounding box Link3. The method of moments requires image thresholding. However, when using SURF the image is in gray scale. So, on passing a gray image for thresholding displays a white image! Now, I am having a tough time in understanding how I should calculate the centroid using the code below (esp what should I use instead of points[i].x since I am using
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt )
where in my case numPoints=good_matches.size(), denoting the number of feature points) as mentioned in the documentation. If anyone can put up an implementation of how to use SURF with centroid then it will be helpful.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
int main()
{
Mat object = imread( "object.png", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][4].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
key = waitKey(1);
}
return 0;
}
so, you already got your pointlists,
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
i think, it's perfectly valid, to calc the centroid based on that, no further image processing nessecary.
there's 2 methods, the 'center of mass' way, that's just the mean position of all points, like this:
Point2f cen(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
cen.x += scene[i].x;
cen.y += scene[i].y;
}
cen.x /= scene.size();
cen.y /= scene.size();
and the 'center of bbox' way
Point2f pmin(1000000,1000000);
Point2f pmax(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
if ( scene[i].x < pmin.x ) pmin.x = scene[i].x;
if ( scene[i].y < pmin.y ) pmin.y = scene[i].y;
if ( scene[i].x > pmax.x ) pmax.x = scene[i].x;
if ( scene[i].y > pmax.y ) pmax.y = scene[i].y;
}
Point2f cen( (pmax.x-pmin.x)/2, (pmax.y-pmin.y)/2);
note, that the results will be different ! they're only the same for circles & squares, point symmetric objects
// now draw a circle around the centroid:
cv::circle( img, cen, 10, Scalar(0,0,255), 2 );
// and a line connecting the query and train points:
cv::line( img, scene[i], obj[i], Scalar(255,0,0), 2 );

OpenCV cv::findHomography runtime error

I am using to compile and run code from Features2D + Homography to find a known object tutorial, and I am getting this
OpenCV Error: Assertion failed (npoints >= 0 && points2.checkVector(2) == npoint
s && points1.type() == points2.type()) in unknown function, file c:\Users\vp\wor
k\ocv\opencv\modules\calib3d\src\fundam.cpp, line 1062
run-time error. after debugging I find that the program is crashing at findHomography function.
Unhandled exception at 0x760ab727 in OpenCVTemplateMatch.exe: Microsoft C++ exception: cv::Exception at memory location 0x0029eb3c..
in the Introduction of OpenCV, the "cv Namespace" chapter says that
Some of the current or future OpenCV external names may conflict with STL or other libraries. In this case, use explicit namespace specifiers to resolve the name conflicts:
I changed my code and use everywhere explicit namespace specifiers, but problem did not solved. If you can, please help me in this problem, or say which function do same thing as findHomography, and do not crash program.
And this is my code
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
void readme();
/** #function main */
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
cv::Mat img_object = cv::imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
cv::Mat img_scene = cv::imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_object.data || !img_scene.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
cv::SurfFeatureDetector detector( minHessian );
std::vector<cv::KeyPoint> keypoints_object, keypoints_scene;
detector.detect( img_object, keypoints_object );
detector.detect( img_scene, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
cv::SurfDescriptorExtractor extractor;
cv::Mat descriptors_object, descriptors_scene;
extractor.compute( img_object, keypoints_object, descriptors_object );
extractor.compute( img_scene, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
cv::FlannBasedMatcher matcher;
std::vector< cv::DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< cv::DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
cv::Mat img_matches;
cv::drawMatches( img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Localize the object
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
cv::Mat H = cv::findHomography( obj, scene, CV_RANSAC );
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<cv::Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( img_object.cols, 0 );
obj_corners[2] = cvPoint( img_object.cols, img_object.rows ); obj_corners[3] = cvPoint( 0, img_object.rows );
std::vector<cv::Point2f> scene_corners(4);
cv::perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv::line( img_matches, scene_corners[0] + cv::Point2f( img_object.cols, 0), scene_corners[1] + cv::Point2f( img_object.cols, 0), cv::Scalar(0, 255, 0), 4 );
cv::line( img_matches, scene_corners[1] + cv::Point2f( img_object.cols, 0), scene_corners[2] + cv::Point2f( img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
cv::line( img_matches, scene_corners[2] + cv::Point2f( img_object.cols, 0), scene_corners[3] + cv::Point2f( img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
cv::line( img_matches, scene_corners[3] + cv::Point2f( img_object.cols, 0), scene_corners[0] + cv::Point2f( img_object.cols, 0), cv::Scalar( 0, 255, 0), 4 );
//-- Show detected matches
cv::imshow( "Good Matches & Object detection", img_matches );
cv::waitKey(0);
return 0;
}
/** #function readme */
void readme()
{ std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl; }
Today I run into the same problem with this example code. #mathematical-coffee was right there were no features extracted, thus obj and scene were empty. I replaced the test pictures and it worked. From texture style images you can't extract SURF features.
Another way to is to lower the parameter minHessianve.g. `int minHessian = 20;
or use the FAST feature detector by changing a few lines:
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 15;
FastFeatureDetector detector( minHessian );
The actual answer is within the error message:
npoints >= 0 && points2.checkVector(2) == npoints && points1.type() == points2.type()
Human readable translation, you have to fulfil these assertions:
Your input must have a positive number of points (in practice an findHomography needs 4 or more points).
Your 'object' and 'scene' list of points must have the same number of points.
Your 'object' and 'scene' list of points must have the same type of points.
I had the same issue and I followed the solution by MMH. Just writing
cv::Mat H = cv::findHomography( cv::Mat(obj), cv::Mat(scene), CV_RANSAC );
cv::perspectiveTransform( cv::Mat(obj_corners), cv::Mat(scene_corners), H);
solved the problem.
More likely, the problem is here:
if( matches[i].distance < 3*min_dist)
The strict inequality is not what you want. If min_dist == 0, a very good match, you will disregard all zero-distance points. Replace with:
if( matches[i].distance <= 3*min_dist)
and you should see good results for images that match well.
To exit gracefully, I would also add, e.g.:
if (good_matches.size() == 0)
{
std::cout<< " --(!) No good matches found " << std::endl; return -2;
}
you need to add a condition before findHomography
if(obj.size()>3){
///-- Get the corners from the image_1 ( the object to be "detected" )
vector<Point2f> obj_corners(4);
obj_corners[0] = Point(0,0); obj_corners[1] = Point( img_object.cols, 0 );
obj_corners[2] = Point( img_object.cols, img_object.rows ); obj_corners[3] = Point( 0, img_object.rows );
Mat H = findHomography( obj, scene,CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
///-- Draw lines between the corners (the mapped object in the scene - image_2 )
for(int i = 0; i < 4; ++i)
line( fram_tmp, scene_corners[i]+offset, scene_corners[(i + 1) % 4]+offset, Scalar(0, 255, 0), 4 );
}