I wonder why there is not good matching between two images using SIFT.
The matched images is shown below.
Original images are
My program is as follow.
int imagematching(Mat &img1, Mat & img2, std::vector<Point2f> &first_keypoints, std::vector<Point2f> &second_keypoints){
int max_keypoints = 500;
Ptr<SIFT> detector = SIFT::create();
Ptr<SIFT> extractor = SIFT::create();
//--Step 1: Key point detection
std::vector<KeyPoint> keypoints1, keypoints2;
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors1, descriptors2;
detector->detect( img1, keypoints1 );
detector->detect( img2, keypoints2 );
extractor->compute(img1, keypoints1, descriptors1);
extractor->compute(img2, keypoints2, descriptors2);
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
double max_dist = 0; double min_dist = 999999;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors1.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
matches.clear();
Mat img_matches;
drawMatches( img1, keypoints1, img2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
descriptors1.release();
descriptors2.release();
//-- Localize the object
//std::vector<Point2f> first_keypoints;
//std::vector<Point2f> second_keypoints;
for( int i = 0; i < good_matches.size(); i++ )
{
//cout << i << " :";
//-- Get the keypoints from the good matches
if( keypoints1[ good_matches[i].queryIdx ].pt.x > 0 && keypoints1[ good_matches[i].queryIdx ].pt.y > 0
&& keypoints2[ good_matches[i].trainIdx ].pt.x > 0 && keypoints2[ good_matches[i].trainIdx ].pt.y > 0){
first_keypoints.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
//cout << "first point" << keypoints1[ good_matches[i].queryIdx ].pt << endl;
second_keypoints.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
//cout << "second point" << keypoints2[ good_matches[i].trainIdx ].pt << endl;
}
}
keypoints1.clear();
keypoints2.clear();
good_matches.clear();
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return SUCCESS;
}
SIFT might be rotation invariant, but it is definitely not perspective invariant.
You will need to add some machine learning - maybe SVM - to be able to match images with different perspective, SIFT features only are not enough.
Related
I need some help trying to match non-normal blood cell with original cell.
After some searches I found SURF and SIFT feature extraction.
I have this template;
and this original image;
This is my code:
int main(int argc, char** argv)
{
Mat img_scene = imread("d:\\projimg\\last.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_object = imread("d:\\projimg\\lasttmp.jpg", CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 200;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
matches.resize(10);
sort(matches.begin(), matches.end());
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i <matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
imshow("ii", img_matches);
waitKey();
return 0;
}
Can anyone help me to match all similar templates ?
Thanks all
I am trying to perform a video stabilization technique using OpenCV and c++. This techniques involves the SURF detection and description, calculating homography and warping perspective based on the homography for each subsequent image frame. From the articles I have seen, I got the impression that this should produce a stabilized video but this is not the case. I can't seem to find out what I'm doing wrong. The following is my code
Mat prev_frame, curr_frame;
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> prev_keypoints, curr_keypoints;
Mat prev_descriptors, curr_descriptors;
VideoCapture cap("patio.mp4");
if (!cap.isOpened())
return -1;
cap >> curr_frame;
detector->detectAndCompute( curr_frame, Mat(), curr_keypoints, curr_descriptors );
namedWindow("Original Video");
namedWindow("Modified Video");
moveWindow("Modified Video", 600, 0);
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
for(;;){
prev_frame = curr_frame.clone();
prev_keypoints = curr_keypoints;
prev_descriptors = curr_descriptors.clone();
curr_keypoints.clear();
curr_descriptors = Mat();
cap >> curr_frame;
if( !prev_frame.data || !curr_frame.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
detector->detectAndCompute( curr_frame, Mat(), curr_keypoints, curr_descriptors );
if (curr_keypoints.size() < 20) {
continue;
}
matcher.match( prev_descriptors, curr_descriptors, matches );
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < prev_descriptors.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std::vector< DMatch > good_matches;
for( int i = 0; i < prev_descriptors.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{
good_matches.push_back( matches[i]);
}
}
std::vector<Point2f> prev;
std::vector<Point2f> curr;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
prev.push_back( prev_keypoints[ good_matches[i].queryIdx ].pt );
curr.push_back( curr_keypoints[ good_matches[i].trainIdx ].pt );
}
if (prev.size() <=4 || curr.size() <=4){
continue;
}
Mat H = findHomography( prev, curr, RANSAC );
Mat modified;
warpPerspective(curr_frame,modified,H,curr_frame.size());
imshow("Original Video", curr_frame);
imshow("Modified Video", modified);
waitKey(30);
}
waitKey(0);
The original and modified videos are in the following links respectively:
https://www.dropbox.com/s/ilqwdocgankfe6n/input.mp4?dl=0
https://www.dropbox.com/s/1kd981j92drkfrn/output.mwv?dl=0
Please Help.
I'm trying to write a program that uses ORB algorithm to detect and compute the keypoints of an image and matches descriptor vectors using FLANN matcher.
The issue I am facing is, that every time I run the program on Visual C++, I am getting an error that tells "vector subscript out of range"(I've also attached an image of the error).
The problem seems to be somewhere in the for because when I start the debugger it stops there and I get the error. When I commented the first for to see if the rest is ok, I've got the same error on the second for.
Please help me find the problem.
#include <iostream>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2\core\types.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\core.hpp>
#include <opencv2\opencv_modules.hpp>
using namespace cv;
using namespace std;
int main()
{
Mat img1 = imread("C:\\Users\\patri\\Desktop\\test.bmp");
Mat img2 = imread("C:\\Users\\patri\\Desktop\\test3.bmp");
/*
if (!img1.data || !img2.data)
{
printf(" --(!) Error reading images \n"); return -1;
}
*/
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
Ptr<ORB> orb = ORB::create(100, 2, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, 20);
orb->detectAndCompute(img1, Mat(), keypoints_1, descriptors_1);
orb->detectAndCompute(img2, Mat(), keypoints_2, descriptors_2);
std::cout << "Found " << keypoints_1.size() << " Keypoints " << std::endl;
std::cout << "Found " << keypoints_2.size() << " Keypoints " << std::endl;
Mat out1, out2;
drawKeypoints(img1, keypoints_1, out1, Scalar::all(255));
drawKeypoints(img2, keypoints_2, out2, Scalar::all(255));
imshow("Kpts1", out1);
imshow("Kpts2", out2);
//////////////////////////////////////////////////////////////////////
// Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
//matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0; double min_dist = 100;
//calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches(img1, keypoints_1, img2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
for (int i = 0; i < (int)good_matches.size(); i++)
{
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx);
}
waitKey(0);
return 0;
}
the error I'm getting
std::vector< DMatch > matches; is empty but you are trying to access its elements in the for loop.
std::vector< DMatch > matches;//this creates an empty vector
//you need to push_back some elements in matches before trying to access it in your loops
......
//calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;//this is trying to access the empty vector
......
}
I think the vector variable good_matches may have 0 size elements, the problem code may hide :
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
I have been working on stitching multiple images in opencv in order to create a mosaic.
I followed this link on opencv:
Stitch multiple images using OpenCV (Python)
Here's the code that I have got so far :
// imgstch.cpp :
// imgstch.cpp :
//#include "stdafx.h"
#include<opencv/cv.h>
#include<opencv/highgui.h>
#include<iostream>
#include<stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#define _CRT_SECURE_NO_WARNINGS
using namespace std;
using namespace cv;
int main()
{
//-- Input the two images
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints;
cv::Mat img2_descriptors;
img1 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b2.JPG");
img2 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b1.JPG");
//-- ORB feature detector, extractor and descriptor
int minHessian = 1800;
OrbFeatureDetector detector( minHessian );
OrbDescriptorExtractor extractor;
detector.detect(img1, img1_keypoints);
detector.detect(img2, img2_keypoints);
extractor.compute(img1, img1_keypoints, img1_descriptors);
extractor.compute(img2, img2_keypoints, img2_descriptors);
//-- Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_HAMMING);
std::vector< DMatch > matches;
matcher.match( img1_descriptors, img2_descriptors, matches );
imshow("image1", img1);
imshow("image2",img2);
//-- Draw matches
Mat img_matches;
drawMatches( img1, img1_keypoints, img2, img2_keypoints, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches);
double max_dist = 0; double min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p1, p2;
for (unsigned int i = 0; i < matches.size(); i++) {
p1.push_back(img1_keypoints[matches[i].queryIdx].pt);
p2.push_back(img2_keypoints[matches[i].trainIdx].pt);
}
// HomografĂa
vector<unsigned char> match_mask;
Mat H = findHomography(p1,p2,CV_RANSAC);
cout << "H = "<< endl << " " << H << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result;
result=img1.clone();
warpPerspective(img1,result,H,cv::Size(img1.cols+img2.cols,img1.rows));
cv::Mat half(result,cv::Rect(0,0,img2.cols,img2.rows));
img2.copyTo(half);
imwrite("/home/ishita/img_stitch/result.jpg",result);
imshow( "Result", result );
//for images 2 and 3
cv::Mat img3;
std::vector<cv::KeyPoint> img3_keypoints;
cv::Mat img3_descriptors;
img3 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b3.JPG");
//detector.detect(img2, img2_keypoints);
detector.detect(img3, img3_keypoints);
//extractor.compute(img2, img2_keypoints, img2_descriptors);
extractor.compute(img3, img3_keypoints, img3_descriptors);
matcher.match( img1_descriptors, img3_descriptors, matches );
//imshow("image2", img1);
imshow("image3",img3);
//-- Draw matches
Mat img_matches2;
drawMatches( img1, img1_keypoints, img3, img3_keypoints, matches, img_matches2 );
//-- Show detected matches
imshow("Matches2", img_matches2 );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches2);
max_dist = 0; min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches2;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches2.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p3, p4;
for (unsigned int i = 0; i < matches.size(); i++) {
p3.push_back(img1_keypoints[matches[i].queryIdx].pt);
p4.push_back(img3_keypoints[matches[i].trainIdx].pt);
}
// HomografĂa
vector<unsigned char> match_mask2;
Mat H2 = findHomography(p3,p4,CV_RANSAC);
Mat H3 = H * H2;
cout << "H2= "<< endl << " " << H2 << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result2;
result2 = result.clone();
warpPerspective(result,result2,H3,cv::Size(img3.cols+result.cols,result.rows));
cv::Mat half2(result,cv::Rect(0,0,img3.cols,img3.rows));
img3.copyTo(half2);
imwrite("/home/ishita/img_stitch/result.jpg",result2);
imshow( "Result2", result2 );
waitKey(0);
return 0;
}
The result of stitching first two images is as required but the result of stitching the third image is not appropriate.
What could be possibly wrong with the logic behind this or in the method's implementation?
The images and the result can be found here :
https://drive.google.com/folderview?id=0BxXVoeIUgVW7fnFMbExhTzN4QnRueXZpQmpILTZCWFZoTlZEckxfWV83VjkxMmFNSXdLVWM&usp=sharing
this answer is too late but may be helpful for other.
in some algorithms dimension of image to be stitching must be same.
(if problem is this test below solution)
1- you could run program once for (img1 , img2)=>result1.
2- then for (img2 , img3)=>result2.
3- (result1 , result2) => final
here is other example that get all images at once:
https://www.geeksforgeeks.org/stitching-input-images-panorama-using-opencv-c/
I am using this FLANN matcher algorithm to match interest points in 2 pictures the code is displayed below).
There is a moment when the code finds a list of matched points:
std::vector<DMatch> good_matches;
I would like to get the points localization (x,y) in both pictures. To create a displacement map. How could I access these points localization?
Cheers,
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
void readme();
/** #function main */
int main(int argc, char** argv) {
if (argc != 3) {
readme();
return -1;
}
// Transform in GrayScale
Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
// Checks if the image could be loaded
if (!img_1.data || !img_2.data) {
std::cout << " --(!) Error reading images " << std::endl;
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = matches[i].distance;
// printf("-- DISTANCE = [%f]\n", dist);
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector<DMatch> good_matches;
for (int i = 0; i < descriptors_1.rows; i++) {
if (matches[i].distance < 2 * min_dist) {
good_matches.push_back(matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches(img_1, keypoints_1, img_2, keypoints_2, good_matches,
img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
for (int i = 0; i < good_matches.size(); i++) {
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i,
good_matches[i].queryIdx, good_matches[i].trainIdx);
}
waitKey(0);
return 0;
}
/** #function readme */
void readme() {
std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl;
}
matched_points1 and 2 will be the corresponding points in the left and right images. Then, you can find the indices of the good_matches with idx1=good_matches[i].trainIdx for the left image and idx2=good_matches[i].queryIdx for the right image. Then just add the corresponding points to your matched_points vector to obtain the x,y point vector of the matches.
long num_matches = good_matches.size();
vector<Point2f> matched_points1;
vector<Point2f> matched_points2;
for (int i=0;i<num_matches;i++)
{
int idx1=good_matches[i].trainIdx;
int idx2=good_matches[i].queryIdx;
matched_points1.push_back(points1[idx1]);
matched_points2.push_back(points2[idx2]);
}
Now you have two vectors of the matched points. I think that's what you're asking?