How to get efficient Result in ORB using opencv 2.4.9? - c++

int method = 0;
std::vector<cv::KeyPoint> keypoints_object, keypoints_scene;
cv::Mat descriptors_object, descriptors_scene;
cv::ORB orb;
int minHessian = 500;
//cv::OrbFeatureDetector detector(500);
//ORB orb(25, 1.0f, 2, 10, 0, 2, 0, 10);
cv::OrbFeatureDetector detector(25, 1.0f, 2, 10, 0, 2, 0, 10);
//cv::OrbFeatureDetector detector(500,1.20000004768,8,31,0,2,ORB::HARRIS_SCORE,31);
cv::OrbDescriptorExtractor extractor;
//-- object
if( method == 0 ) { //-- ORB
orb.detect(img_object, keypoints_object);
//cv::drawKeypoints(img_object, keypoints_object, img_object, cv::Scalar(0,255,255));
//cv::imshow("template", img_object);
orb.compute(img_object, keypoints_object, descriptors_object);
} else { //-- SURF test
detector.detect(img_object, keypoints_object);
extractor.compute(img_object, keypoints_object, descriptors_object);
}
// http://stackoverflow.com/a/11798593
//if(descriptors_object.type() != CV_32F)
// descriptors_object.convertTo(descriptors_object, CV_32F);
//for(;;) {
cv::Mat frame = cv::imread("E:\\Projects\\Images\\2-134-2.bmp", 1);
cv::Mat img_scene = cv::Mat(frame.size(), CV_8UC1);
cv::cvtColor(frame, img_scene, cv::COLOR_RGB2GRAY);
//frame.copyTo(img_scene);
if( method == 0 ) { //-- ORB
orb.detect(img_scene, keypoints_scene);
orb.compute(img_scene, keypoints_scene, descriptors_scene);
} else { //-- SURF
detector.detect(img_scene, keypoints_scene);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
}
//-- matching descriptor vectors using FLANN matcher
cv::BFMatcher matcher;
std::vector<cv::DMatch> matches;
cv::Mat img_matches;
if(!descriptors_object.empty() && !descriptors_scene.empty()) {
matcher.match (descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min idstance between keypoints
for( int i = 0; i < descriptors_object.rows; i++)
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//printf("-- Max dist : %f \n", max_dist );
//printf("-- Min dist : %f \n", min_dist );
//-- Draw only good matches (i.e. whose distance is less than 3*min_dist)
std::vector< cv::DMatch >good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < (max_dist/1.6) )
{ good_matches.push_back( matches[i]); }
}
cv::drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, \
good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- localize the object
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++) {
//-- get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
if( !obj.empty() && !scene.empty() && good_matches.size() >= 4) {
cv::Mat H = cv::findHomography( obj, scene, cv::RANSAC );
//-- get the corners from the object to be detected
std::vector<cv::Point2f> obj_corners(4);
obj_corners[0] = cv::Point(0,0);
obj_corners[1] = cv::Point(img_object.cols,0);
obj_corners[2] = cv::Point(img_object.cols,img_object.rows);
obj_corners[3] = cv::Point(0,img_object.rows);
std::vector<cv::Point2f> scene_corners(4);
cv::perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv::line( img_matches, \
scene_corners[0] + cv::Point2f(img_object.cols, 0), \
scene_corners[1] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
cv::line( img_matches, \
scene_corners[1] + cv::Point2f(img_object.cols, 0), \
scene_corners[2] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
cv::line( img_matches, \
scene_corners[2] + cv::Point2f(img_object.cols, 0), \
scene_corners[3] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
cv::line( img_matches, \
scene_corners[3] + cv::Point2f(img_object.cols, 0), \
scene_corners[0] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
}
}
t =(double) getTickCount() - t;
printf("Time :%f",(double)(t*1000./getTickFrequency()));
cv::imshow("match result", img_matches );
cv::waitKey();
return 0;
Here I am performing template matching between two Images. where I extract key points using ORB algorithm and matching that with BF Matcher but I am not getting good result. Here I am adding Image to understand problem
Here as you can see Dark Blue line on teddy which is actually a rectangle which would be drawn around object from frame Image when object will be recognized by matching key points.
Here I am using Opencv 2.4.9, what changes should I make to get good result?

In any feature detection+extraction followed by a homography estimation, there are many parameters you can play with. However the main point to realise is that it's almost always the issue of Computation Time VS. Accuracy.
The most crucial fail point of your code is your ORB initialization:
cv::OrbFeatureDetector detector(25, 1.0f, 2, 10, 0, 2, 0, 10);
The first parameter tells the extractor to only use the top 25 results from the detector. For a reliable estimation of an 8 DOF homography with no constraints on parameters, you should have an order of magnitude more features than parameters, i.e. 80, or just make it an even 100.
The second parameter is for scaling the images down (or the detector patch up) between octaves (or levels). using the number 1.0f means you don't change the scale between octaves, this makes no sense, especially since your third parameter is the number of levels which is 2 and not 1. The default is 1.2f for scale and 8 levels, for less calculations, use a scaling of 1.5f and 4 levels (again, just a suggestion, other parameters will work too).
your fourth and last parameters say that the patch size to calculate on is 10x10, that's pretty small, but if you work on low resolution that's fine.
your score type (one before last parameter) can change runtime a bit, you can use the ORB::FAST_SCORE instead of the ORB::HARRIS_SCORE but it doesn't matter much.
Last but not least, when you initialise the BruteForce Matcher object, you should remember to use the cv::NORM_HAMMING type since ORB is a binary feature, this will make the norm calculations on the matching process actually mean something.

Related

Image Stitching warsPerspective size issue

I am trying to stitch two images. tech stack is opecv c++ on vs 2017.
The image that I had considered are:
image1 of code :
and
image2 of code:
I have found the homoography matrix using this code. I have considered image1 and image2 as given above.
int minHessian = 400;
Ptr<SURF> detector = SURF::create(minHessian);
vector< KeyPoint > keypoints_object, keypoints_scene;
detector->detect(gray_image1, keypoints_object);
detector->detect(gray_image2, keypoints_scene);
Mat img_keypoints;
drawKeypoints(gray_image1, keypoints_object, img_keypoints);
imshow("SURF Keypoints", img_keypoints);
Mat img_keypoints1;
drawKeypoints(gray_image2, keypoints_scene, img_keypoints1);
imshow("SURF Keypoints1", img_keypoints1);
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_object, descriptors_scene;
detector->compute(gray_image1, keypoints_object, descriptors_object);
detector->compute(gray_image2, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
vector< DMatch > matches;
matcher->match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist: %f \n", max_dist);
printf("-- Min dist: %f \n", min_dist);
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
vector< DMatch > good_matches;
Mat result, H;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(gray_image1, keypoints_object, gray_image2, keypoints_scene, good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("Good Matches", img_matches);
std::vector< Point2f > obj;
std::vector< Point2f > scene;
cout << "Good Matches detected" << good_matches.size() << endl;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
// Find the Homography Matrix for img 1 and img2
H = findHomography(obj, scene, RANSAC);
The next step would be to warp these. I used perspectivetransform function to find the corner of image1 on the stitched image. I had considered this as the number of columns to be used in the Mat result.This is the code I wrote ->
vector<Point2f> imageCorners(4);
imageCorners[0] = Point(0, 0);
imageCorners[1] = Point(image1.cols, 0);
imageCorners[2] = Point(image1.cols, image1.rows);
imageCorners[3] = Point(0, image1.rows);
vector<Point2f> projectedCorners(4);
perspectiveTransform(imageCorners, projectedCorners, H);
Mat result;
warpPerspective(image1, result, H, Size(projectedCorners[2].x, image1.rows));
Mat half(result, Rect(0, 0, image2.cols, image2.rows));
image2.copyTo(half);
imshow("result", result);
I am getting a stitched output of these images. But the issue is with the size of the image. I was doing a comparison by combining the two original images manually with the result of the above code. The size of the result from code is more. What should I do to make it of perfect size? The ideal size should be image1.cols + image2.cols - overlapping length.
warpPerspective(image1, result, H, Size(projectedCorners[2].x, image1.rows));
This line seems problematic.
You should choose the extremum points for the size.
Rect rec = boundingRect(projectedCorners);
warpPerspective(image1, result, H, rec.size());
But you will lose the parts if rec.tl() falls to negative axes, so you should shift the homography matrix to fall in the first quadrant.
See Warping to perspective section of my answer to Fast and Robust Image Stitching Algorithm for many images in Python.

How to match two different image in C++

I'm trying to reconstruct a 3D model of a anatomical structure. So I want to match key points in pair of X ray images. I tried it by using following code. But it didn't give correct results.
Mat tmp = cv::imread( "1.jpg", 1 );
Mat in = cv::imread( "2.jpg", 1 );
cv::SiftFeatureDetector detector( 0.0001, 1.0 );
cv::SiftDescriptorExtractor extractor;
vector<KeyPoint> keypoints1, keypoints2;
detector.detect( tmp, keypoints1 );
detector.detect( in, keypoints2 );
Mat feat1,feat2;
drawKeypoints(tmp,keypoints1,feat1,Scalar(255, 255, 255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
drawKeypoints(in,keypoints2,feat2,Scalar(255, 255, 255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite( "feat1.bmp", feat1 );
imwrite( "feat2.bmp", feat2 );
int key1 = keypoints1.size();
int key2 = keypoints2.size();
printf("Keypoint1=%d \nKeypoint2=%d", key1, key2);
Mat descriptor1,descriptor2;
extractor.compute( tmp, keypoints1, descriptor1 );
extractor.compute( in, keypoints2, descriptor2 );
BruteForceMatcher<L2<float> > matcher;
std::vector< DMatch > matches;
matcher.match( descriptor1, descriptor2, matches );
double max_dist = 0; double min_dist = 100;
Mat img_matches;
for( int i = 0; i < descriptor1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptor1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.03) )
{ good_matches.push_back( matches[i]); }
}
drawMatches( tmp, keypoints1, in, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
namedWindow("SIFT", CV_WINDOW_AUTOSIZE );
imshow("SIFT", img_matches);
imwrite("sift_1.jpg",img_matches);
waitKey(0);
return 0;
These are the two images
This is what i got from this code
This is very close to my expected result but it also matching wrong points. This shows few points but i need more points.
Feature detectors like SIFT or SURF are designed to work and match images that have a rich and distinctive texture. They are not designed to work with very spares binary inputs like your examples.
You might want to try them on the original X-Rays for more image context.
Alternatively, you might try a more direct global alignment model between the images.
Check out this link for some options for alignment with the findTransformECC() function.
Also see the article here.
I think you may try to use ITK, ITK is designed to complete image registration with 2D or 3D images.

not getting expected result for disparity map of two images

I recently started working on opencv and facing problem in getting desired result. I don't know where I am mistaking. I have two uncalibrated images and have to calculate disparity map for them without any other support data(like camera matrix).
int minHessian = 2080;
Ptr<SURF> detector = SURF::create(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
detector->detectAndCompute(h1, noArray(), keypoints_1, descriptors_1);
detector->detectAndCompute(h2, noArray(), keypoints_2, descriptors_2);
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
Mat img_matches;
drawMatches(h1, keypoints_1, h2, keypoints_2, good_matches, img_matches,Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
imwrite("Good Matches.jpg", img_matches);
for (int i = 0; i < (int) good_matches.size(); i++) {
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i,good_matches[i].queryIdx, good_matches[i].trainIdx);
}
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
for (int i = 0; i < good_matches.size(); i++) {
//-- Get the keypoints from the good matches
obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);
}
cv::Mat H = cv::findFundamentalMat(obj, scene, CV_FM_RANSAC);
cv::Mat H1(4, 4, h1.type());
cv::Mat H2(4, 4, h1.type());
cv::stereoRectifyUncalibrated(obj, scene, H, h1.size(), H1, H2);
imshow("h1", h1);
cv::Mat rectified1(h1.size(), h1.type());
cv::warpPerspective(h1, rectified1, H1, h1.size());
cv::imshow("rectified1.jpg", rectified1);
cv::imwrite("rectified1.jpg", rectified1);
imshow("h2", h2);
cv::Mat rectified2(h2.size(), h2.type());
cv::warpPerspective(h2, rectified2, H2, h2.size());
cv::imshow("rectified2.jpg", rectified2);
cv::imwrite("rectified2.jpg", rectified2);
Mat test;
addWeighted(rectified1, 0.5, rectified2, 0.5, 0.0, test);
imshow("test", test);
//-- Depth map
int ndisparities = 16*5;
double minVal;
double maxVal;
Ptr<StereoSGBM> sgbm = StereoSGBM::create(16, ndisparities, 1, 0, 0, 0,0, 0,0, 0,StereoSGBM::MODE_HH);
//-- 3. Calculate the disparity image via SGBM
Mat disparity2;
sgbm->compute(rectified1, rectified2, disparity2);
minMaxLoc(disparity2, &minVal, &maxVal);
printf("Min disp: %f Max value: %f \n", minVal, maxVal);
disparity2.convertTo(disparity2, CV_8UC1, 255 / (maxVal - minVal));
cv::imshow("Disparity Map sgbm", disparity2);
imwrite("out2.jpg", disparity2);
Left Image and right image
rectified left and right image
Disparity map
I think rectified images are okey and problem is in parameter of sgbm. Is there any way to callibrate them.
Yes, your rectified images look ok and yes, it's hard to find good parameters. I tried
Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, //int minDisparity
80, //int numDisparities
5, //int SADWindowSize 3
600, //int P1 = 0
2400, //int P2 = 0
0, //int disp12MaxDiff = 0
0, //int preFilterCap = 0
0, //int uniquenessRatio = 0
0, //int speckleWindowSize = 0
0, //int speckleRange = 0
false); //bool fullDP = false
and the result is quite better:

OpenCV - RobustMatcher using findHomography

I've implement a Robust matcher found on the internet based on differents tests : symmetry test, Ratio Test and RANSAC test. It works well.
I used then findHomography in order to have good matches.
Here the code :
RobustMatcher::RobustMatcher() : ratio(0.65f), refineF(true),confidence(0.99), distance(3.0) {
detector = new cv::SurfFeatureDetector(400); //Better than ORB
//detector = new cv::SiftFeatureDetector; //Better than ORB
//extractor= new cv::OrbDescriptorExtractor();
//extractor= new cv::SiftDescriptorExtractor;
extractor= new cv::SurfDescriptorExtractor;
// matcher= new cv::FlannBasedMatcher;
matcher= new cv::BFMatcher();
}
// Clear matches for which NN ratio is > than threshold
// return the number of removed points
// (corresponding entries being cleared,
// i.e. size will be 0)
int RobustMatcher::ratioTest(std::vector<std::vector<cv::DMatch> >
&matches) {
int removed=0;
// for all matches
for (std::vector<std::vector<cv::DMatch> >::iterator
matchIterator= matches.begin();
matchIterator!= matches.end(); ++matchIterator) {
// if 2 NN has been identified
if (matchIterator->size() > 1) {
// check distance ratio
if ((*matchIterator)[0].distance/
(*matchIterator)[1].distance > ratio) {
matchIterator->clear(); // remove match
removed++;
}
} else { // does not have 2 neighbours
matchIterator->clear(); // remove match
removed++;
}
}
return removed;
}
// Insert symmetrical matches in symMatches vector
void RobustMatcher::symmetryTest(
const std::vector<std::vector<cv::DMatch> >& matches1,
const std::vector<std::vector<cv::DMatch> >& matches2,
std::vector<cv::DMatch>& symMatches) {
// for all matches image 1 -> image 2
for (std::vector<std::vector<cv::DMatch> >::
const_iterator matchIterator1= matches1.begin();
matchIterator1!= matches1.end(); ++matchIterator1) {
// ignore deleted matches
if (matchIterator1->size() < 2)
continue;
// for all matches image 2 -> image 1
for (std::vector<std::vector<cv::DMatch> >::
const_iterator matchIterator2= matches2.begin();
matchIterator2!= matches2.end();
++matchIterator2) {
// ignore deleted matches
if (matchIterator2->size() < 2)
continue;
// Match symmetry test
if ((*matchIterator1)[0].queryIdx ==
(*matchIterator2)[0].trainIdx &&
(*matchIterator2)[0].queryIdx ==
(*matchIterator1)[0].trainIdx) {
// add symmetrical match
symMatches.push_back(
cv::DMatch((*matchIterator1)[0].queryIdx,
(*matchIterator1)[0].trainIdx,
(*matchIterator1)[0].distance));
break; // next match in image 1 -> image 2
}
}
}
}
// Identify good matches using RANSAC
// Return fundemental matrix
cv::Mat RobustMatcher::ransacTest(const std::vector<cv::DMatch>& matches,const std::vector<cv::KeyPoint>& keypoints1,
const std::vector<cv::KeyPoint>& keypoints2,
std::vector<cv::DMatch>& outMatches) {
// Convert keypoints into Point2f
std::vector<cv::Point2f> points1, points2;
cv::Mat fundemental;
for (std::vector<cv::DMatch>::const_iterator it= matches.begin();it!= matches.end(); ++it) {
// Get the position of left keypoints
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
points1.push_back(cv::Point2f(x,y));
// Get the position of right keypoints
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
points2.push_back(cv::Point2f(x,y));
}
// Compute F matrix using RANSAC
std::vector<uchar> inliers(points1.size(),0);
if (points1.size()>0&&points2.size()>0){
cv::Mat fundemental= cv::findFundamentalMat(
cv::Mat(points1),cv::Mat(points2), // matching points
inliers, // match status (inlier or outlier)
CV_FM_RANSAC, // RANSAC method
distance, // distance to epipolar line
confidence); // confidence probability
// extract the surviving (inliers) matches
std::vector<uchar>::const_iterator itIn= inliers.begin();
std::vector<cv::DMatch>::const_iterator itM= matches.begin();
// for all matches
for ( ;itIn!= inliers.end(); ++itIn, ++itM) {
if (*itIn) { // it is a valid match
outMatches.push_back(*itM);
}
}
if (refineF) {
// The F matrix will be recomputed with
// all accepted matches
// Convert keypoints into Point2f
// for final F computation
points1.clear();
points2.clear();
for (std::vector<cv::DMatch>::const_iterator it= outMatches.begin();it!= outMatches.end(); ++it) {
// Get the position of left keypoints
float x= keypoints1[it->queryIdx].pt.x;
float y= keypoints1[it->queryIdx].pt.y;
points1.push_back(cv::Point2f(x,y));
// Get the position of right keypoints
x= keypoints2[it->trainIdx].pt.x;
y= keypoints2[it->trainIdx].pt.y;
points2.push_back(cv::Point2f(x,y));
}
// Compute 8-point F from all accepted matches
if (points1.size()>0&&points2.size()>0){
fundemental= cv::findFundamentalMat(cv::Mat(points1),cv::Mat(points2), // matches
CV_FM_8POINT); // 8-point method
}
}
}
return fundemental;
}
// Match feature points using symmetry test and RANSAC
// returns fundemental matrix
cv::Mat RobustMatcher::match(cv::Mat& image1,
cv::Mat& image2, // input images
// output matches and keypoints
std::vector<cv::DMatch>& matches,
std::vector<cv::KeyPoint>& keypoints1,
std::vector<cv::KeyPoint>& keypoints2) {
if (!matches.empty()){
matches.erase(matches.begin(),matches.end());
}
// 1a. Detection of the SIFT features
detector->detect(image1,keypoints1);
detector->detect(image2,keypoints2);
// 1b. Extraction of the SIFT descriptors
/*cv::Mat img_keypoints;
cv::Mat img_keypoints2;
drawKeypoints( image1, keypoints1, img_keypoints, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
drawKeypoints( image2, keypoints2, img_keypoints2, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//-- Show detected (drawn) keypoints
//cv::imshow("Result keypoints detected", img_keypoints);
// cv::imshow("Result keypoints detected", img_keypoints2);
cv::waitKey(5000);*/
cv::Mat descriptors1, descriptors2;
extractor->compute(image1,keypoints1,descriptors1);
extractor->compute(image2,keypoints2,descriptors2);
// 2. Match the two image descriptors
// Construction of the matcher
//cv::BruteForceMatcher<cv::L2<float>> matcher;
// from image 1 to image 2
// based on k nearest neighbours (with k=2)
std::vector<std::vector<cv::DMatch> > matches1;
matcher->knnMatch(descriptors1,descriptors2,
matches1, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
// from image 2 to image 1
// based on k nearest neighbours (with k=2)
std::vector<std::vector<cv::DMatch> > matches2;
matcher->knnMatch(descriptors2,descriptors1,
matches2, // vector of matches (up to 2 per entry)
2); // return 2 nearest neighbours
// 3. Remove matches for which NN ratio is
// > than threshold
// clean image 1 -> image 2 matches
int removed= ratioTest(matches1);
// clean image 2 -> image 1 matches
removed= ratioTest(matches2);
// 4. Remove non-symmetrical matches
std::vector<cv::DMatch> symMatches;
symmetryTest(matches1,matches2,symMatches);
// 5. Validate matches using RANSAC
cv::Mat fundemental= ransacTest(symMatches,
keypoints1, keypoints2, matches);
// return the found fundemental matrix
return fundemental;
}
cv::Mat img_matches;
drawMatches(image1, keypoints_img1,image2, keypoints_img2,
matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
std::cout << "Number of good matching " << (int)matches.size() << "\n" << endl;
if ((int)matches.size() > 5 ){
Debug::info("Good matching !");
}
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_img1[ matches[i].queryIdx ].pt );
scene.push_back( keypoints_img2[matches[i].trainIdx ].pt );
}
cv::Mat arrayRansac;
std::vector<uchar> inliers(obj.size(),0);
Mat H = findHomography( obj, scene, CV_RANSAC,3,inliers);
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( image1.cols, 0 );
obj_corners[2] = cvPoint( image1.cols, image1.rows ); obj_corners[3] = cvPoint( 0, image1.rows );
std::vector<Point2f> scene_corners(4);
perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f( image1.cols, 0), scene_corners[1] + Point2f( image1.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( image1.cols, 0), scene_corners[2] + Point2f( image1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( image1.cols, 0), scene_corners[3] + Point2f( image1.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( image1.cols, 0), scene_corners[0] + Point2f( image1.cols, 0), Scalar( 0, 255, 0), 4 );
}
</pre><code>
I have results like this (Homography is good):
But I don't understand why for some of my results where the match is good I have these kind of results (homography not seems to be good):
Can someone explain me? Maybe I have to adjust the parameters? But if I reduce constraints (rise the ratio for example) instead of have no matching between two pictures (this is good), I have a lot of matching... And I don't want to. Besides the homography doesn't work at all (I have a green line only like above).
And inversely, my robust matcher works (too) well that is to say that for differents sames picture (just rotated, differents scale etc) , that's work fine but when I have two similar image, I have no match at all...
So I don't how can I do a good computation. I'm a beginner. The robust matcher works well but for the exactly same image but for two similar images like above, it doesn't work and this is a problem.
Maybe I'm on the wrong way.
Before post this message, I of course read a lot on Stack but I didn't find the answer. (For example Here)
It is due to how SURF descriptors work, see http://docs.opencv.org/trunk/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.html
Basically with Droid the image is mostly flat color and it's difficult to find keypoints that are not ambiguous. With Nike, the shape is the same, but the intensity ratio is completely different in the descriptors: imagine on the left the center of a descriptor will be intensity 0 and on the right 1. Even if you normalize the intensity of the images, you're not going to have a match.
If your goal is just to match logos, I suggest you look into edge detection algorithms, like: http://docs.opencv.org/doc/tutorials/imgproc/imgtrans/canny_detector/canny_detector.html

Opencv Image Stitching or Panorama

I am doing image stitching in OpenCV (A panorama) but I have one problem.
I can't use the class Stitching from OpenCV so I must create it with only feature points and homographies.
OrbFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1a, descriptors_2a;
detector.detect( img_1, keypoints_1 , descriptors_1a);
detector.detect( img_2, keypoints_2 , descriptors_2a);
//-- Step 2: Calculate descriptors (feature vectors)
OrbDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
cout<<"La distancia es " <<endl;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_HAMMING, true);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
Here I obtain the feature points in matches, but I need to filter it:
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
//cout<<"La distancia es " << i<<endl;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches.push_back( matches[i]); }
}
Now, I calculate the Homography
vector<Point2f> p1, p2;
for (unsigned int i = 0; i < matches.size(); i++) {
p1.push_back(keypoints_1[matches[i].queryIdx].pt);
p2.push_back(keypoints_2[matches[i].trainIdx].pt);
}
// Homografía
vector<unsigned char> match_mask;
Mat h = findHomography(Mat(p1),Mat(p2), match_mask,CV_RANSAC);
ANd finally, obtain the transform matrix and apply warpPerspective to obtain the join of the two images, but my problem is that in the final image, appears black areas around the photo, and when I loop again, the final image will be ilegible.
// Transformar perspectiva para imagen 2
vector<Point2f> cuatroPuntos;
cuatroPuntos.push_back(Point2f (0,0));
cuatroPuntos.push_back(Point2f (img_1.size().width,0));
cuatroPuntos.push_back(Point2f (0, img_1.size().height));
cuatroPuntos.push_back(Point2f (img_1.size().width, img_1.size().height));
Mat MDestino;
perspectiveTransform(Mat(cuatroPuntos), MDestino, h);
// Calcular esquinas de imagen 2
double min_x, min_y, tam_x, tam_y;
float min_x1, min_x2, min_y1, min_y2, max_x1, max_x2, max_y1, max_y2;
min_x1 = min(MDestino.at<Point2f>(0).x, MDestino.at<Point2f>(1).x);
min_x2 = min(MDestino.at<Point2f>(2).x, MDestino.at<Point2f>(3).x);
min_y1 = min(MDestino.at<Point2f>(0).y, MDestino.at<Point2f>(1).y);
min_y2 = min(MDestino.at<Point2f>(2).y, MDestino.at<Point2f>(3).y);
max_x1 = max(MDestino.at<Point2f>(0).x, MDestino.at<Point2f>(1).x);
max_x2 = max(MDestino.at<Point2f>(2).x, MDestino.at<Point2f>(3).x);
max_y1 = max(MDestino.at<Point2f>(0).y, MDestino.at<Point2f>(1).y);
max_y2 = max(MDestino.at<Point2f>(2).y, MDestino.at<Point2f>(3).y);
min_x = min(min_x1, min_x2);
min_y = min(min_y1, min_y2);
tam_x = max(max_x1, max_x2);
tam_y = max(max_y1, max_y2);
// Matriz de transformación
Mat Htr = Mat::eye(3,3,CV_64F);
if (min_x < 0){
tam_x = img_2.size().width - min_x;
Htr.at<double>(0,2)= -min_x;
}
if (min_y < 0){
tam_y = img_2.size().height - min_y;
Htr.at<double>(1,2)= -min_y;
}
// Construir panorama
Mat Panorama;
Panorama = Mat(Size(tam_x,tam_y), CV_32F);
warpPerspective(img_2, Panorama, Htr, Panorama.size(), INTER_LINEAR, BORDER_CONSTANT, 0);
warpPerspective(img_1, Panorama, (Htr*h), Panorama.size(), INTER_LINEAR, BORDER_TRANSPARENT,0);
Anyone knows how can I eliminate this black areas? Is something that I do bad? Anyone knows a functional code that I can see to compare it?
Thanks for your time
EDIT:
That is my image:
And I want to eliminate the black part.
As Micka suggested, when you do stitching, the panorama is usually wavy, because homography or other projection methods do not map a rectangle to another rectangle. You can compensate this effect by using some "straightening", referring to this article:
M. Brown and D. G. Lowe. Automatic panoramic image stitching using invariant features. IJCV, 74(1):59–73, 2007
As to cropping the black part, I wrote this class that you can use. This class assumes the image is BGR and the black pixels have value Vec3b(0,0,0). The source code can be accessed here:
https://github.com/chmos/crop-images.git
Best,