not getting expected result for disparity map of two images - c++

I recently started working on opencv and facing problem in getting desired result. I don't know where I am mistaking. I have two uncalibrated images and have to calculate disparity map for them without any other support data(like camera matrix).
int minHessian = 2080;
Ptr<SURF> detector = SURF::create(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
detector->detectAndCompute(h1, noArray(), keypoints_1, descriptors_1);
detector->detectAndCompute(h2, noArray(), keypoints_2, descriptors_2);
//-- Step 2: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = matches[i].distance;
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
Mat img_matches;
drawMatches(h1, keypoints_1, h2, keypoints_2, good_matches, img_matches,Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
imwrite("Good Matches.jpg", img_matches);
for (int i = 0; i < (int) good_matches.size(); i++) {
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i,good_matches[i].queryIdx, good_matches[i].trainIdx);
}
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
for (int i = 0; i < good_matches.size(); i++) {
//-- Get the keypoints from the good matches
obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);
}
cv::Mat H = cv::findFundamentalMat(obj, scene, CV_FM_RANSAC);
cv::Mat H1(4, 4, h1.type());
cv::Mat H2(4, 4, h1.type());
cv::stereoRectifyUncalibrated(obj, scene, H, h1.size(), H1, H2);
imshow("h1", h1);
cv::Mat rectified1(h1.size(), h1.type());
cv::warpPerspective(h1, rectified1, H1, h1.size());
cv::imshow("rectified1.jpg", rectified1);
cv::imwrite("rectified1.jpg", rectified1);
imshow("h2", h2);
cv::Mat rectified2(h2.size(), h2.type());
cv::warpPerspective(h2, rectified2, H2, h2.size());
cv::imshow("rectified2.jpg", rectified2);
cv::imwrite("rectified2.jpg", rectified2);
Mat test;
addWeighted(rectified1, 0.5, rectified2, 0.5, 0.0, test);
imshow("test", test);
//-- Depth map
int ndisparities = 16*5;
double minVal;
double maxVal;
Ptr<StereoSGBM> sgbm = StereoSGBM::create(16, ndisparities, 1, 0, 0, 0,0, 0,0, 0,StereoSGBM::MODE_HH);
//-- 3. Calculate the disparity image via SGBM
Mat disparity2;
sgbm->compute(rectified1, rectified2, disparity2);
minMaxLoc(disparity2, &minVal, &maxVal);
printf("Min disp: %f Max value: %f \n", minVal, maxVal);
disparity2.convertTo(disparity2, CV_8UC1, 255 / (maxVal - minVal));
cv::imshow("Disparity Map sgbm", disparity2);
imwrite("out2.jpg", disparity2);
Left Image and right image
rectified left and right image
Disparity map
I think rectified images are okey and problem is in parameter of sgbm. Is there any way to callibrate them.

Yes, your rectified images look ok and yes, it's hard to find good parameters. I tried
Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, //int minDisparity
80, //int numDisparities
5, //int SADWindowSize 3
600, //int P1 = 0
2400, //int P2 = 0
0, //int disp12MaxDiff = 0
0, //int preFilterCap = 0
0, //int uniquenessRatio = 0
0, //int speckleWindowSize = 0
0, //int speckleRange = 0
false); //bool fullDP = false
and the result is quite better:

Related

Image Stitching warsPerspective size issue

I am trying to stitch two images. tech stack is opecv c++ on vs 2017.
The image that I had considered are:
image1 of code :
and
image2 of code:
I have found the homoography matrix using this code. I have considered image1 and image2 as given above.
int minHessian = 400;
Ptr<SURF> detector = SURF::create(minHessian);
vector< KeyPoint > keypoints_object, keypoints_scene;
detector->detect(gray_image1, keypoints_object);
detector->detect(gray_image2, keypoints_scene);
Mat img_keypoints;
drawKeypoints(gray_image1, keypoints_object, img_keypoints);
imshow("SURF Keypoints", img_keypoints);
Mat img_keypoints1;
drawKeypoints(gray_image2, keypoints_scene, img_keypoints1);
imshow("SURF Keypoints1", img_keypoints1);
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_object, descriptors_scene;
detector->compute(gray_image1, keypoints_object, descriptors_object);
detector->compute(gray_image2, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
vector< DMatch > matches;
matcher->match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist: %f \n", max_dist);
printf("-- Min dist: %f \n", min_dist);
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
vector< DMatch > good_matches;
Mat result, H;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(gray_image1, keypoints_object, gray_image2, keypoints_scene, good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("Good Matches", img_matches);
std::vector< Point2f > obj;
std::vector< Point2f > scene;
cout << "Good Matches detected" << good_matches.size() << endl;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
// Find the Homography Matrix for img 1 and img2
H = findHomography(obj, scene, RANSAC);
The next step would be to warp these. I used perspectivetransform function to find the corner of image1 on the stitched image. I had considered this as the number of columns to be used in the Mat result.This is the code I wrote ->
vector<Point2f> imageCorners(4);
imageCorners[0] = Point(0, 0);
imageCorners[1] = Point(image1.cols, 0);
imageCorners[2] = Point(image1.cols, image1.rows);
imageCorners[3] = Point(0, image1.rows);
vector<Point2f> projectedCorners(4);
perspectiveTransform(imageCorners, projectedCorners, H);
Mat result;
warpPerspective(image1, result, H, Size(projectedCorners[2].x, image1.rows));
Mat half(result, Rect(0, 0, image2.cols, image2.rows));
image2.copyTo(half);
imshow("result", result);
I am getting a stitched output of these images. But the issue is with the size of the image. I was doing a comparison by combining the two original images manually with the result of the above code. The size of the result from code is more. What should I do to make it of perfect size? The ideal size should be image1.cols + image2.cols - overlapping length.
warpPerspective(image1, result, H, Size(projectedCorners[2].x, image1.rows));
This line seems problematic.
You should choose the extremum points for the size.
Rect rec = boundingRect(projectedCorners);
warpPerspective(image1, result, H, rec.size());
But you will lose the parts if rec.tl() falls to negative axes, so you should shift the homography matrix to fall in the first quadrant.
See Warping to perspective section of my answer to Fast and Robust Image Stitching Algorithm for many images in Python.

Does the StereoBM class in opencv do rectification of the input images or frames?

I am using the SteroBM class for a stereo vision as part of my project. I am taking the input frames from 2 Web cams and running the Stereo block matching computation on the input frames gray scale frames without rectification. The output I am getting is far from the ground truth(very patchy). I want to know, is it because I am not doing rectification on input frames. Moreover, the base line I have chosen to keep at 20cm. I am using opencv-3.2.0 version c++.
The code I am running is given below.
#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
#include </home/eswar/softwares/opencv_contrib-3.2.0/modules/contrib_world/include/opencv2/contrib_world.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
//initialize and allocate memory to load the video stream from camera
VideoCapture camera0(0);
VideoCapture camera1(1);
if( !camera0.isOpened() ) return 1;
if( !camera1.isOpened() ) return 1;
Mat frame0,frame1;
Mat frame0gray,frame1gray;
Mat dispbm,dispsgbm;
Mat dispnorm_bm,dispnorm_sgbm;
Mat falseColorsMap, sfalseColorsMap;
int ndisparities = 16*5; /**< Range of disparity */
int SADWindowSize = 21; /**< Size of the block window. Must be odd */
Ptr<StereoBM> sbm = StereoBM::create( ndisparities, SADWindowSize );
Ptr<StereoSGBM> sgbm = StereoSGBM::create(0, //int minDisparity
96, //int numDisparities
5, //int SADWindowSize
600, //int P1 = 0
2400, //int P2 = 0
10, //int disp12MaxDiff = 0
16, //int preFilterCap = 0
2, //int uniquenessRatio = 0
20, //int speckleWindowSize = 0
30, //int speckleRange = 0
true); //bool fullDP = false
//-- Check its extreme values
double minVal; double maxVal;
while(true)
{
//grab and retrieve each frames of the video sequentially
camera0 >> frame0;
camera1 >> frame1;
imshow("Video0", frame0);
imshow("Video1", frame1);
cvtColor(frame0,frame0gray,CV_BGR2GRAY);
cvtColor(frame1,frame1gray,CV_BGR2GRAY);
sbm->compute( frame0gray, frame1gray, dispbm );
minMaxLoc( dispbm, &minVal, &maxVal );
dispbm.convertTo( dispnorm_bm, CV_8UC1, 255/(maxVal - minVal));
sgbm->compute(frame0gray, frame1gray, dispsgbm);
minMaxLoc( dispsgbm, &minVal, &maxVal );
dispsgbm.convertTo( dispnorm_sgbm, CV_8UC1, 255/(maxVal - minVal));
imshow( "BM", dispnorm_bm);
imshow( "SGBM",dispnorm_sgbm);
//wait for 40 milliseconds
int c = cvWaitKey(40);
//exit the loop if user press "Esc" key (ASCII value of "Esc" is 27)
if(27 == char(c)) break;
}
return 0;
}
Although in the code you see block matching also being used, please ignore because its giving even worse output. I find that the SGBM output is closer to the ground truth and therefore I've decided to improve on it. However if any help about how the block matching results can be improved. It would great and I'd certainly appreciate that.
Th output image depth image for SGBM technique looks like.
No, StereoBM doesn't do rectification, just block matching and some pre and post processing, however opencv provide functions for camera calibration and rectification check this link
Also there is a ready made example for this process in opencv examples so don't have to write the code from scratch.
About the results, StereoBM is based on SAD algorithm(local stereo-matching) which is not robust, you can try wls filter, which could improve your results significantly.
StereoSGBM is based on SGM algorithm (actually it is a little different from the one introduced in the original paper) is semi global algorithm which consider global optimisation in disparity map generation which produce better disparity but slower.
As indicated above I tried rectification of the frames. The code is below.
#include <opencv2/core.hpp>
#include <opencv2/opencv.hpp>
#include </home/eswar/softwares/opencv_contrib-3.2.0/modules/contrib_world /include/opencv2/contrib_world.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/imgproc.hpp>
#include <stdio.h>
#include <iostream>
#include <opencv2/xfeatures2d/nonfree.hpp>
using namespace std;
using namespace cv;
using namespace cv::xfeatures2d;
int main()
{
//initialize and allocate memory to load the video stream from camera
VideoCapture camera0(0);
VideoCapture camera1(1);
int count=0;
Mat loRes, hiRes;
if( !camera0.isOpened() ) return 1;
if( !camera1.isOpened() ) return 1;
camera0.set(CV_CAP_PROP_FRAME_WIDTH, 400);
camera0.set(CV_CAP_PROP_FRAME_HEIGHT, 400);
camera1.set(CV_CAP_PROP_FRAME_WIDTH, 400);
camera1.set(CV_CAP_PROP_FRAME_HEIGHT, 400);
Mat frame0,frame1;
Mat frame0gray,frame1gray;
Mat dispbm,dispsgbm,disparity,disparity1;
Mat dispnorm_bm,dispnorm_sgbm;
Mat falseColorsMap, sfalseColorsMap,falsemap;
Mat img_matches;
Mat H1,H2;
int ndisparities = 96; /**< Range of disparity */
int SADWindowSize = 7;
Ptr<StereoBM> sbm = StereoBM::create( ndisparities, SADWindowSize );
Ptr<StereoSGBM> sgbm = StereoSGBM::create(-3, //int minDisparity
96, //int numDisparities
7, //int SADWindowSize
60, //int P1 = 0
2400, //int P2 = 0
90, //int disp12MaxDiff = 0
16, //int preFilterCap = 0
1, //int uniquenessRatio = 0
60, //int speckleWindowSize = 0
20, //int speckleRange = 0
true); //bool fullDP = false
//-- Check its extreme values
double minVal; double maxVal;
double max_dist = 0;
double min_dist = 100;
int minHessian = 630;
Ptr<Feature2D> f2d = SIFT::create();
vector<KeyPoint> keypoints_1, keypoints_2;
Ptr<Feature2D> fd = SIFT::create();
Mat descriptors_1, descriptors_2;
BFMatcher matcher(NORM_L2, true); //BFMatcher matcher(NORM_L2);
vector< DMatch > matches;
vector< DMatch > good_matches;
vector<Point2f>imgpts1,imgpts2;
vector<uchar> status;
while(true)
{
//grab and retrieve each frames of the video sequentially
camera0 >> frame0;
camera1 >> frame1;
imshow("Video0", frame0);
imshow("Video1", frame1);
cvtColor(frame0,frame0gray,CV_BGR2GRAY);
cvtColor(frame1,frame1gray,CV_BGR2GRAY);
sbm->compute( frame0gray, frame1gray, dispbm );
minMaxLoc( dispbm, &minVal, &maxVal );
dispbm.convertTo( dispnorm_bm, CV_8UC1, 255/(maxVal - minVal));
sgbm->compute(frame0gray, frame1gray, dispsgbm);
minMaxLoc( dispsgbm, &minVal, &maxVal );
dispsgbm.convertTo( dispnorm_sgbm, CV_8UC1, 255/(maxVal - minVal));
applyColorMap(dispnorm_bm, falseColorsMap, cv::COLORMAP_JET);
applyColorMap(dispnorm_sgbm, sfalseColorsMap, cv::COLORMAP_JET);
f2d->detect( frame0gray, keypoints_1 );
f2d->detect( frame1gray, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
fd->compute( frame0gray, keypoints_1, descriptors_1 );
fd->compute( frame1gray, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
matcher.match( descriptors_1, descriptors_2, matches );
drawMatches(frame0gray, keypoints_1, frame1gray, keypoints_2, matches, img_matches);
imshow("matches", img_matches);
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
for( int i = 0; i < matches.size(); i++ )
{
if( matches[i].distance <= max(4.5*min_dist, 0.02) ){
good_matches.push_back( matches[i]);
imgpts1.push_back(keypoints_1[matches[i].queryIdx].pt);
imgpts2.push_back(keypoints_2[matches[i].trainIdx].pt);
}
}
Mat F = findFundamentalMat(imgpts1, imgpts2, cv::FM_RANSAC, 3., 0.99, status); //FM_RANSAC
stereoRectifyUncalibrated(imgpts1, imgpts1, F, frame0gray.size(), H1, H2);
Mat rectified1(frame0gray.size(), frame0gray.type());
warpPerspective(frame0gray, rectified1, H1, frame0gray.size());
Mat rectified2(frame1gray.size(), frame1gray.type());
warpPerspective(frame1gray, rectified2, H2, frame1gray.size());
sgbm->compute(rectified1, rectified2, disparity);
minMaxLoc( disparity, &minVal, &maxVal );
disparity.convertTo( disparity1, CV_8UC1, 255/(maxVal - minVal));
applyColorMap(disparity1, falsemap, cv::COLORMAP_JET);
imshow("disparity_rectified_color", falsemap);
imshow( "BM", falseColorsMap);
imshow( "CSGBM",sfalseColorsMap);
//wait for 40 milliseconds
int c = cvWaitKey(40);
//exit the loop if user press "Esc" key (ASCII value of "Esc" is 27)
if(27 == char(c)) break;
}
return 0;
}
Now the output again isn't that good but improved from last time. However there seems to be one constant problem, that is also seen in the above image. The left side of the output image has a total black region. It shouldn't come this way right.
How to solve this problem?
Any help appreciated.

How to match two different image in C++

I'm trying to reconstruct a 3D model of a anatomical structure. So I want to match key points in pair of X ray images. I tried it by using following code. But it didn't give correct results.
Mat tmp = cv::imread( "1.jpg", 1 );
Mat in = cv::imread( "2.jpg", 1 );
cv::SiftFeatureDetector detector( 0.0001, 1.0 );
cv::SiftDescriptorExtractor extractor;
vector<KeyPoint> keypoints1, keypoints2;
detector.detect( tmp, keypoints1 );
detector.detect( in, keypoints2 );
Mat feat1,feat2;
drawKeypoints(tmp,keypoints1,feat1,Scalar(255, 255, 255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
drawKeypoints(in,keypoints2,feat2,Scalar(255, 255, 255),DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite( "feat1.bmp", feat1 );
imwrite( "feat2.bmp", feat2 );
int key1 = keypoints1.size();
int key2 = keypoints2.size();
printf("Keypoint1=%d \nKeypoint2=%d", key1, key2);
Mat descriptor1,descriptor2;
extractor.compute( tmp, keypoints1, descriptor1 );
extractor.compute( in, keypoints2, descriptor2 );
BruteForceMatcher<L2<float> > matcher;
std::vector< DMatch > matches;
matcher.match( descriptor1, descriptor2, matches );
double max_dist = 0; double min_dist = 100;
Mat img_matches;
for( int i = 0; i < descriptor1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptor1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.03) )
{ good_matches.push_back( matches[i]); }
}
drawMatches( tmp, keypoints1, in, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
namedWindow("SIFT", CV_WINDOW_AUTOSIZE );
imshow("SIFT", img_matches);
imwrite("sift_1.jpg",img_matches);
waitKey(0);
return 0;
These are the two images
This is what i got from this code
This is very close to my expected result but it also matching wrong points. This shows few points but i need more points.
Feature detectors like SIFT or SURF are designed to work and match images that have a rich and distinctive texture. They are not designed to work with very spares binary inputs like your examples.
You might want to try them on the original X-Rays for more image context.
Alternatively, you might try a more direct global alignment model between the images.
Check out this link for some options for alignment with the findTransformECC() function.
Also see the article here.
I think you may try to use ITK, ITK is designed to complete image registration with 2D or 3D images.

How to get efficient Result in ORB using opencv 2.4.9?

int method = 0;
std::vector<cv::KeyPoint> keypoints_object, keypoints_scene;
cv::Mat descriptors_object, descriptors_scene;
cv::ORB orb;
int minHessian = 500;
//cv::OrbFeatureDetector detector(500);
//ORB orb(25, 1.0f, 2, 10, 0, 2, 0, 10);
cv::OrbFeatureDetector detector(25, 1.0f, 2, 10, 0, 2, 0, 10);
//cv::OrbFeatureDetector detector(500,1.20000004768,8,31,0,2,ORB::HARRIS_SCORE,31);
cv::OrbDescriptorExtractor extractor;
//-- object
if( method == 0 ) { //-- ORB
orb.detect(img_object, keypoints_object);
//cv::drawKeypoints(img_object, keypoints_object, img_object, cv::Scalar(0,255,255));
//cv::imshow("template", img_object);
orb.compute(img_object, keypoints_object, descriptors_object);
} else { //-- SURF test
detector.detect(img_object, keypoints_object);
extractor.compute(img_object, keypoints_object, descriptors_object);
}
// http://stackoverflow.com/a/11798593
//if(descriptors_object.type() != CV_32F)
// descriptors_object.convertTo(descriptors_object, CV_32F);
//for(;;) {
cv::Mat frame = cv::imread("E:\\Projects\\Images\\2-134-2.bmp", 1);
cv::Mat img_scene = cv::Mat(frame.size(), CV_8UC1);
cv::cvtColor(frame, img_scene, cv::COLOR_RGB2GRAY);
//frame.copyTo(img_scene);
if( method == 0 ) { //-- ORB
orb.detect(img_scene, keypoints_scene);
orb.compute(img_scene, keypoints_scene, descriptors_scene);
} else { //-- SURF
detector.detect(img_scene, keypoints_scene);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
}
//-- matching descriptor vectors using FLANN matcher
cv::BFMatcher matcher;
std::vector<cv::DMatch> matches;
cv::Mat img_matches;
if(!descriptors_object.empty() && !descriptors_scene.empty()) {
matcher.match (descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min idstance between keypoints
for( int i = 0; i < descriptors_object.rows; i++)
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//printf("-- Max dist : %f \n", max_dist );
//printf("-- Min dist : %f \n", min_dist );
//-- Draw only good matches (i.e. whose distance is less than 3*min_dist)
std::vector< cv::DMatch >good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < (max_dist/1.6) )
{ good_matches.push_back( matches[i]); }
}
cv::drawMatches(img_object, keypoints_object, img_scene, keypoints_scene, \
good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- localize the object
std::vector<cv::Point2f> obj;
std::vector<cv::Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++) {
//-- get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
if( !obj.empty() && !scene.empty() && good_matches.size() >= 4) {
cv::Mat H = cv::findHomography( obj, scene, cv::RANSAC );
//-- get the corners from the object to be detected
std::vector<cv::Point2f> obj_corners(4);
obj_corners[0] = cv::Point(0,0);
obj_corners[1] = cv::Point(img_object.cols,0);
obj_corners[2] = cv::Point(img_object.cols,img_object.rows);
obj_corners[3] = cv::Point(0,img_object.rows);
std::vector<cv::Point2f> scene_corners(4);
cv::perspectiveTransform( obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
cv::line( img_matches, \
scene_corners[0] + cv::Point2f(img_object.cols, 0), \
scene_corners[1] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
cv::line( img_matches, \
scene_corners[1] + cv::Point2f(img_object.cols, 0), \
scene_corners[2] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
cv::line( img_matches, \
scene_corners[2] + cv::Point2f(img_object.cols, 0), \
scene_corners[3] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
cv::line( img_matches, \
scene_corners[3] + cv::Point2f(img_object.cols, 0), \
scene_corners[0] + cv::Point2f(img_object.cols, 0), \
cv::Scalar(0,255,0), 4 );
}
}
t =(double) getTickCount() - t;
printf("Time :%f",(double)(t*1000./getTickFrequency()));
cv::imshow("match result", img_matches );
cv::waitKey();
return 0;
Here I am performing template matching between two Images. where I extract key points using ORB algorithm and matching that with BF Matcher but I am not getting good result. Here I am adding Image to understand problem
Here as you can see Dark Blue line on teddy which is actually a rectangle which would be drawn around object from frame Image when object will be recognized by matching key points.
Here I am using Opencv 2.4.9, what changes should I make to get good result?
In any feature detection+extraction followed by a homography estimation, there are many parameters you can play with. However the main point to realise is that it's almost always the issue of Computation Time VS. Accuracy.
The most crucial fail point of your code is your ORB initialization:
cv::OrbFeatureDetector detector(25, 1.0f, 2, 10, 0, 2, 0, 10);
The first parameter tells the extractor to only use the top 25 results from the detector. For a reliable estimation of an 8 DOF homography with no constraints on parameters, you should have an order of magnitude more features than parameters, i.e. 80, or just make it an even 100.
The second parameter is for scaling the images down (or the detector patch up) between octaves (or levels). using the number 1.0f means you don't change the scale between octaves, this makes no sense, especially since your third parameter is the number of levels which is 2 and not 1. The default is 1.2f for scale and 8 levels, for less calculations, use a scaling of 1.5f and 4 levels (again, just a suggestion, other parameters will work too).
your fourth and last parameters say that the patch size to calculate on is 10x10, that's pretty small, but if you work on low resolution that's fine.
your score type (one before last parameter) can change runtime a bit, you can use the ORB::FAST_SCORE instead of the ORB::HARRIS_SCORE but it doesn't matter much.
Last but not least, when you initialise the BruteForce Matcher object, you should remember to use the cv::NORM_HAMMING type since ORB is a binary feature, this will make the norm calculations on the matching process actually mean something.

Opencv Image Stitching or Panorama

I am doing image stitching in OpenCV (A panorama) but I have one problem.
I can't use the class Stitching from OpenCV so I must create it with only feature points and homographies.
OrbFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1a, descriptors_2a;
detector.detect( img_1, keypoints_1 , descriptors_1a);
detector.detect( img_2, keypoints_2 , descriptors_2a);
//-- Step 2: Calculate descriptors (feature vectors)
OrbDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
cout<<"La distancia es " <<endl;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_HAMMING, true);
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
Here I obtain the feature points in matches, but I need to filter it:
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
//cout<<"La distancia es " << i<<endl;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches.push_back( matches[i]); }
}
Now, I calculate the Homography
vector<Point2f> p1, p2;
for (unsigned int i = 0; i < matches.size(); i++) {
p1.push_back(keypoints_1[matches[i].queryIdx].pt);
p2.push_back(keypoints_2[matches[i].trainIdx].pt);
}
// Homografía
vector<unsigned char> match_mask;
Mat h = findHomography(Mat(p1),Mat(p2), match_mask,CV_RANSAC);
ANd finally, obtain the transform matrix and apply warpPerspective to obtain the join of the two images, but my problem is that in the final image, appears black areas around the photo, and when I loop again, the final image will be ilegible.
// Transformar perspectiva para imagen 2
vector<Point2f> cuatroPuntos;
cuatroPuntos.push_back(Point2f (0,0));
cuatroPuntos.push_back(Point2f (img_1.size().width,0));
cuatroPuntos.push_back(Point2f (0, img_1.size().height));
cuatroPuntos.push_back(Point2f (img_1.size().width, img_1.size().height));
Mat MDestino;
perspectiveTransform(Mat(cuatroPuntos), MDestino, h);
// Calcular esquinas de imagen 2
double min_x, min_y, tam_x, tam_y;
float min_x1, min_x2, min_y1, min_y2, max_x1, max_x2, max_y1, max_y2;
min_x1 = min(MDestino.at<Point2f>(0).x, MDestino.at<Point2f>(1).x);
min_x2 = min(MDestino.at<Point2f>(2).x, MDestino.at<Point2f>(3).x);
min_y1 = min(MDestino.at<Point2f>(0).y, MDestino.at<Point2f>(1).y);
min_y2 = min(MDestino.at<Point2f>(2).y, MDestino.at<Point2f>(3).y);
max_x1 = max(MDestino.at<Point2f>(0).x, MDestino.at<Point2f>(1).x);
max_x2 = max(MDestino.at<Point2f>(2).x, MDestino.at<Point2f>(3).x);
max_y1 = max(MDestino.at<Point2f>(0).y, MDestino.at<Point2f>(1).y);
max_y2 = max(MDestino.at<Point2f>(2).y, MDestino.at<Point2f>(3).y);
min_x = min(min_x1, min_x2);
min_y = min(min_y1, min_y2);
tam_x = max(max_x1, max_x2);
tam_y = max(max_y1, max_y2);
// Matriz de transformación
Mat Htr = Mat::eye(3,3,CV_64F);
if (min_x < 0){
tam_x = img_2.size().width - min_x;
Htr.at<double>(0,2)= -min_x;
}
if (min_y < 0){
tam_y = img_2.size().height - min_y;
Htr.at<double>(1,2)= -min_y;
}
// Construir panorama
Mat Panorama;
Panorama = Mat(Size(tam_x,tam_y), CV_32F);
warpPerspective(img_2, Panorama, Htr, Panorama.size(), INTER_LINEAR, BORDER_CONSTANT, 0);
warpPerspective(img_1, Panorama, (Htr*h), Panorama.size(), INTER_LINEAR, BORDER_TRANSPARENT,0);
Anyone knows how can I eliminate this black areas? Is something that I do bad? Anyone knows a functional code that I can see to compare it?
Thanks for your time
EDIT:
That is my image:
And I want to eliminate the black part.
As Micka suggested, when you do stitching, the panorama is usually wavy, because homography or other projection methods do not map a rectangle to another rectangle. You can compensate this effect by using some "straightening", referring to this article:
M. Brown and D. G. Lowe. Automatic panoramic image stitching using invariant features. IJCV, 74(1):59–73, 2007
As to cropping the black part, I wrote this class that you can use. This class assumes the image is BGR and the black pixels have value Vec3b(0,0,0). The source code can be accessed here:
https://github.com/chmos/crop-images.git
Best,