I am using this FLANN matcher algorithm to match interest points in 2 pictures the code is displayed below).
There is a moment when the code finds a list of matched points:
std::vector<DMatch> good_matches;
I would like to get the points localization (x,y) in both pictures. To create a displacement map. How could I access these points localization?
Cheers,
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
void readme();
/** #function main */
int main(int argc, char** argv) {
if (argc != 3) {
readme();
return -1;
}
// Transform in GrayScale
Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
// Checks if the image could be loaded
if (!img_1.data || !img_2.data) {
std::cout << " --(!) Error reading images " << std::endl;
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = matches[i].distance;
// printf("-- DISTANCE = [%f]\n", dist);
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector<DMatch> good_matches;
for (int i = 0; i < descriptors_1.rows; i++) {
if (matches[i].distance < 2 * min_dist) {
good_matches.push_back(matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches(img_1, keypoints_1, img_2, keypoints_2, good_matches,
img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
for (int i = 0; i < good_matches.size(); i++) {
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i,
good_matches[i].queryIdx, good_matches[i].trainIdx);
}
waitKey(0);
return 0;
}
/** #function readme */
void readme() {
std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl;
}
matched_points1 and 2 will be the corresponding points in the left and right images. Then, you can find the indices of the good_matches with idx1=good_matches[i].trainIdx for the left image and idx2=good_matches[i].queryIdx for the right image. Then just add the corresponding points to your matched_points vector to obtain the x,y point vector of the matches.
long num_matches = good_matches.size();
vector<Point2f> matched_points1;
vector<Point2f> matched_points2;
for (int i=0;i<num_matches;i++)
{
int idx1=good_matches[i].trainIdx;
int idx2=good_matches[i].queryIdx;
matched_points1.push_back(points1[idx1]);
matched_points2.push_back(points2[idx2]);
}
Now you have two vectors of the matched points. I think that's what you're asking?
Related
I need some help trying to match non-normal blood cell with original cell.
After some searches I found SURF and SIFT feature extraction.
I have this template;
and this original image;
This is my code:
int main(int argc, char** argv)
{
Mat img_scene = imread("d:\\projimg\\last.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_object = imread("d:\\projimg\\lasttmp.jpg", CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 200;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
matches.resize(10);
sort(matches.begin(), matches.end());
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i <matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
imshow("ii", img_matches);
waitKey();
return 0;
}
Can anyone help me to match all similar templates ?
Thanks all
I'm trying to write a program that uses ORB algorithm to detect and compute the keypoints of an image and matches descriptor vectors using FLANN matcher.
The issue I am facing is, that every time I run the program on Visual C++, I am getting an error that tells "vector subscript out of range"(I've also attached an image of the error).
The problem seems to be somewhere in the for because when I start the debugger it stops there and I get the error. When I commented the first for to see if the rest is ok, I've got the same error on the second for.
Please help me find the problem.
#include <iostream>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2\core\types.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\core.hpp>
#include <opencv2\opencv_modules.hpp>
using namespace cv;
using namespace std;
int main()
{
Mat img1 = imread("C:\\Users\\patri\\Desktop\\test.bmp");
Mat img2 = imread("C:\\Users\\patri\\Desktop\\test3.bmp");
/*
if (!img1.data || !img2.data)
{
printf(" --(!) Error reading images \n"); return -1;
}
*/
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
Ptr<ORB> orb = ORB::create(100, 2, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, 20);
orb->detectAndCompute(img1, Mat(), keypoints_1, descriptors_1);
orb->detectAndCompute(img2, Mat(), keypoints_2, descriptors_2);
std::cout << "Found " << keypoints_1.size() << " Keypoints " << std::endl;
std::cout << "Found " << keypoints_2.size() << " Keypoints " << std::endl;
Mat out1, out2;
drawKeypoints(img1, keypoints_1, out1, Scalar::all(255));
drawKeypoints(img2, keypoints_2, out2, Scalar::all(255));
imshow("Kpts1", out1);
imshow("Kpts2", out2);
//////////////////////////////////////////////////////////////////////
// Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
//matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0; double min_dist = 100;
//calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches(img1, keypoints_1, img2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
for (int i = 0; i < (int)good_matches.size(); i++)
{
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx);
}
waitKey(0);
return 0;
}
the error I'm getting
std::vector< DMatch > matches; is empty but you are trying to access its elements in the for loop.
std::vector< DMatch > matches;//this creates an empty vector
//you need to push_back some elements in matches before trying to access it in your loops
......
//calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;//this is trying to access the empty vector
......
}
I think the vector variable good_matches may have 0 size elements, the problem code may hide :
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
I have a stereo camera setup and I am trying to match features between the two frames so that I can triangulate the corresponding points into a 3d point cloud.
It kind of works, using SURF, but is too slow for real-time use. Is there a faster way? Or, a way around the problem?
This is my code:
bool matchFeatures(Mat img_1, Mat img_2)
{
points_2D_left.clear();
points_2D_right.clear();
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400; SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
points_2D_left.push_back(keypoints_1[good_matches[i].queryIdx].pt);
points_2D_right.push_back(keypoints_2[good_matches[i].trainIdx].pt);
}
return true;
}
SURF is slow. Try to use ORB, which operates in real time.
OrbFeatureDetector
I have been working on stitching multiple images in opencv in order to create a mosaic.
I followed this link on opencv:
Stitch multiple images using OpenCV (Python)
Here's the code that I have got so far :
// imgstch.cpp :
// imgstch.cpp :
//#include "stdafx.h"
#include<opencv/cv.h>
#include<opencv/highgui.h>
#include<iostream>
#include<stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#define _CRT_SECURE_NO_WARNINGS
using namespace std;
using namespace cv;
int main()
{
//-- Input the two images
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints;
cv::Mat img2_descriptors;
img1 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b2.JPG");
img2 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b1.JPG");
//-- ORB feature detector, extractor and descriptor
int minHessian = 1800;
OrbFeatureDetector detector( minHessian );
OrbDescriptorExtractor extractor;
detector.detect(img1, img1_keypoints);
detector.detect(img2, img2_keypoints);
extractor.compute(img1, img1_keypoints, img1_descriptors);
extractor.compute(img2, img2_keypoints, img2_descriptors);
//-- Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_HAMMING);
std::vector< DMatch > matches;
matcher.match( img1_descriptors, img2_descriptors, matches );
imshow("image1", img1);
imshow("image2",img2);
//-- Draw matches
Mat img_matches;
drawMatches( img1, img1_keypoints, img2, img2_keypoints, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches);
double max_dist = 0; double min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p1, p2;
for (unsigned int i = 0; i < matches.size(); i++) {
p1.push_back(img1_keypoints[matches[i].queryIdx].pt);
p2.push_back(img2_keypoints[matches[i].trainIdx].pt);
}
// HomografĂa
vector<unsigned char> match_mask;
Mat H = findHomography(p1,p2,CV_RANSAC);
cout << "H = "<< endl << " " << H << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result;
result=img1.clone();
warpPerspective(img1,result,H,cv::Size(img1.cols+img2.cols,img1.rows));
cv::Mat half(result,cv::Rect(0,0,img2.cols,img2.rows));
img2.copyTo(half);
imwrite("/home/ishita/img_stitch/result.jpg",result);
imshow( "Result", result );
//for images 2 and 3
cv::Mat img3;
std::vector<cv::KeyPoint> img3_keypoints;
cv::Mat img3_descriptors;
img3 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b3.JPG");
//detector.detect(img2, img2_keypoints);
detector.detect(img3, img3_keypoints);
//extractor.compute(img2, img2_keypoints, img2_descriptors);
extractor.compute(img3, img3_keypoints, img3_descriptors);
matcher.match( img1_descriptors, img3_descriptors, matches );
//imshow("image2", img1);
imshow("image3",img3);
//-- Draw matches
Mat img_matches2;
drawMatches( img1, img1_keypoints, img3, img3_keypoints, matches, img_matches2 );
//-- Show detected matches
imshow("Matches2", img_matches2 );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches2);
max_dist = 0; min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches2;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches2.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p3, p4;
for (unsigned int i = 0; i < matches.size(); i++) {
p3.push_back(img1_keypoints[matches[i].queryIdx].pt);
p4.push_back(img3_keypoints[matches[i].trainIdx].pt);
}
// HomografĂa
vector<unsigned char> match_mask2;
Mat H2 = findHomography(p3,p4,CV_RANSAC);
Mat H3 = H * H2;
cout << "H2= "<< endl << " " << H2 << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result2;
result2 = result.clone();
warpPerspective(result,result2,H3,cv::Size(img3.cols+result.cols,result.rows));
cv::Mat half2(result,cv::Rect(0,0,img3.cols,img3.rows));
img3.copyTo(half2);
imwrite("/home/ishita/img_stitch/result.jpg",result2);
imshow( "Result2", result2 );
waitKey(0);
return 0;
}
The result of stitching first two images is as required but the result of stitching the third image is not appropriate.
What could be possibly wrong with the logic behind this or in the method's implementation?
The images and the result can be found here :
https://drive.google.com/folderview?id=0BxXVoeIUgVW7fnFMbExhTzN4QnRueXZpQmpILTZCWFZoTlZEckxfWV83VjkxMmFNSXdLVWM&usp=sharing
this answer is too late but may be helpful for other.
in some algorithms dimension of image to be stitching must be same.
(if problem is this test below solution)
1- you could run program once for (img1 , img2)=>result1.
2- then for (img2 , img3)=>result2.
3- (result1 , result2) => final
here is other example that get all images at once:
https://www.geeksforgeeks.org/stitching-input-images-panorama-using-opencv-c/
here is the code for Features2D + Homography to find a known object from open cv documentation
#include<opencv\cv.h>
#include <opencv2\core\core.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\nonfree\nonfree.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
/** #function main */
int main(){
/*-- Load the images --*/
Mat image1= imread("C:\\panL.jpg");
Mat image2 = imread("C:\\panR.jpg");
if (!image1.data || !image2.data)
{
cout << " --(!) Error reading images " << endl; return -1;
}
imshow("first image", image2);
imshow("second image", image1);
/*-- Detecting the keypoints using SURF Detector --*/
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(image1, keypoints_1);
detector.detect(image2, keypoints_2);
/*-- Calculating descriptors (feature vectors) --*/
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(image1, keypoints_1, descriptors_1);
extractor.compute(image2, keypoints_2, descriptors_2);
/*-- Step 3: Matching descriptor vectors using FLANN matcher --*/
FlannBasedMatcher matcher;
vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
//-- Quick calculation of max and min distances between keypoints
double max_dist = 0; double min_dist = 100;
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
cout << "-- Max dist :" << max_dist << endl;
cout << "-- Min dist :" << min_dist << endl;
/*-- Drawing matches whose distance is less than 2*min_dist,
*-- or a small arbitary value ( 0.02 ) in the event that min_dist is verysmall)
*/
vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
/*-- Draw only good matches --*/
Mat img_matches;
drawMatches(image1, keypoints_1, image2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
/*-- Show detected matches --*/
imshow("Good Matches", img_matches);
for (int i = 0; i < (int)good_matches.size(); i++)
{
cout << "-- Good Match [i] Keypoint 1: " << good_matches[i].queryIdx << " -- Keypoint 2:" << good_matches[i].trainIdx << endl;
}
vector< Point2f > obj;
vector< Point2f > scene;
if (good_matches.size() >= 4)
{
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);
}
// Find the Homography Matrix
Mat H = findHomography(obj, scene, CV_RANSAC);
// Use the Homography Matrix to warp the images
Mat result;
warpPerspective(image1, result, H, Size(image1.cols + image2.cols, image1.rows));
Mat half(result, Rect(0, 0, image2.cols, image2.rows));
image2.copyTo(half);
imshow("Result", result);
}
waitKey(0);
return 0;
}
on compilation it gives 2 error:
Error 5 error LNK2019: unresolved external symbol "class cv::Mat __cdecl cv::findHomography(class cv::_InputArray const &,class cv::_InputArray const &,int,double,class cv::_OutputArray const &)" (?findHomography#cv##YA?AVMat#1#AEBV_InputArray#1#0HNAEBV_OutputArray#1##Z) referenced in function main C:\Users\Paradox\Documents\Visual Studio 2013\Projects\Stiching~1\Stiching~1\Source.obj Stiching~1
Error 6 error LNK1120: 1 unresolved externals C:\Users\Paradox\Documents\Visual Studio 2013\Projects\Stiching~1\x64\Debug\Stiching~1.exe 1 1 Stiching~1
You get the link errors because you don't link the OpenCV libs. You can add the following libs to VS2013 Project's Properties > Linker > Input > Additional Dependencies (assume you're using OpenCV-2.4.8 in Debug mode):
opencv_videostab248d.lib
opencv_video248d.lib
opencv_ts248d.lib
opencv_superres248d.lib
opencv_stitching248d.lib
opencv_photo248d.lib
opencv_ocl248d.lib
opencv_objdetect248d.lib
opencv_nonfree248d.lib
opencv_ml248d.lib
opencv_legacy248d.lib
opencv_imgproc248d.lib
opencv_highgui248d.lib
opencv_gpu248d.lib
opencv_flann248d.lib
opencv_features2d248d.lib
opencv_core248d.lib
opencv_contrib248d.lib
opencv_calib3d248d.lib
It will be much easier if you're using CMake, which can be simply done by:
target_link_libraries(yourProject ${OpenCV_LIBS})