OpenCV - How to match multi-instance of templates in image using C++ - c++

I need some help trying to match non-normal blood cell with original cell.
After some searches I found SURF and SIFT feature extraction.
I have this template;
and this original image;
This is my code:
int main(int argc, char** argv)
{
Mat img_scene = imread("d:\\projimg\\last.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_object = imread("d:\\projimg\\lasttmp.jpg", CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 200;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
matches.resize(10);
sort(matches.begin(), matches.end());
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i <matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
imshow("ii", img_matches);
waitKey();
return 0;
}
Can anyone help me to match all similar templates ?
Thanks all

Related

Feature points matching using SIFT between two images

I wonder why there is not good matching between two images using SIFT.
The matched images is shown below.
Original images are
My program is as follow.
int imagematching(Mat &img1, Mat & img2, std::vector<Point2f> &first_keypoints, std::vector<Point2f> &second_keypoints){
int max_keypoints = 500;
Ptr<SIFT> detector = SIFT::create();
Ptr<SIFT> extractor = SIFT::create();
//--Step 1: Key point detection
std::vector<KeyPoint> keypoints1, keypoints2;
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors1, descriptors2;
detector->detect( img1, keypoints1 );
detector->detect( img2, keypoints2 );
extractor->compute(img1, keypoints1, descriptors1);
extractor->compute(img2, keypoints2, descriptors2);
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
double max_dist = 0; double min_dist = 999999;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors1.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
matches.clear();
Mat img_matches;
drawMatches( img1, keypoints1, img2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
descriptors1.release();
descriptors2.release();
//-- Localize the object
//std::vector<Point2f> first_keypoints;
//std::vector<Point2f> second_keypoints;
for( int i = 0; i < good_matches.size(); i++ )
{
//cout << i << " :";
//-- Get the keypoints from the good matches
if( keypoints1[ good_matches[i].queryIdx ].pt.x > 0 && keypoints1[ good_matches[i].queryIdx ].pt.y > 0
&& keypoints2[ good_matches[i].trainIdx ].pt.x > 0 && keypoints2[ good_matches[i].trainIdx ].pt.y > 0){
first_keypoints.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
//cout << "first point" << keypoints1[ good_matches[i].queryIdx ].pt << endl;
second_keypoints.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
//cout << "second point" << keypoints2[ good_matches[i].trainIdx ].pt << endl;
}
}
keypoints1.clear();
keypoints2.clear();
good_matches.clear();
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return SUCCESS;
}
SIFT might be rotation invariant, but it is definitely not perspective invariant.
You will need to add some machine learning - maybe SVM - to be able to match images with different perspective, SIFT features only are not enough.

Vector subscript out of range error in c++ and opencv

I'm trying to write a program that uses ORB algorithm to detect and compute the keypoints of an image and matches descriptor vectors using FLANN matcher.
The issue I am facing is, that every time I run the program on Visual C++, I am getting an error that tells "vector subscript out of range"(I've also attached an image of the error).
The problem seems to be somewhere in the for because when I start the debugger it stops there and I get the error. When I commented the first for to see if the rest is ok, I've got the same error on the second for.
Please help me find the problem.
#include <iostream>
#include <stdio.h>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
#include <opencv2\core\types.hpp>
#include <opencv2\highgui.hpp>
#include <opencv2\core.hpp>
#include <opencv2\opencv_modules.hpp>
using namespace cv;
using namespace std;
int main()
{
Mat img1 = imread("C:\\Users\\patri\\Desktop\\test.bmp");
Mat img2 = imread("C:\\Users\\patri\\Desktop\\test3.bmp");
/*
if (!img1.data || !img2.data)
{
printf(" --(!) Error reading images \n"); return -1;
}
*/
std::vector<KeyPoint> keypoints_1, keypoints_2;
Mat descriptors_1, descriptors_2;
Ptr<ORB> orb = ORB::create(100, 2, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, 20);
orb->detectAndCompute(img1, Mat(), keypoints_1, descriptors_1);
orb->detectAndCompute(img2, Mat(), keypoints_2, descriptors_2);
std::cout << "Found " << keypoints_1.size() << " Keypoints " << std::endl;
std::cout << "Found " << keypoints_2.size() << " Keypoints " << std::endl;
Mat out1, out2;
drawKeypoints(img1, keypoints_1, out1, Scalar::all(255));
drawKeypoints(img2, keypoints_2, out2, Scalar::all(255));
imshow("Kpts1", out1);
imshow("Kpts2", out2);
//////////////////////////////////////////////////////////////////////
// Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
//matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0; double min_dist = 100;
//calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches(img1, keypoints_1, img2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
for (int i = 0; i < (int)good_matches.size(); i++)
{
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx);
}
waitKey(0);
return 0;
}
the error I'm getting
std::vector< DMatch > matches; is empty but you are trying to access its elements in the for loop.
std::vector< DMatch > matches;//this creates an empty vector
//you need to push_back some elements in matches before trying to access it in your loops
......
//calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;//this is trying to access the empty vector
......
}
I think the vector variable good_matches may have 0 size elements, the problem code may hide :
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}

Real time feature matching with stereo camera

I have a stereo camera setup and I am trying to match features between the two frames so that I can triangulate the corresponding points into a 3d point cloud.
It kind of works, using SURF, but is too slow for real-time use. Is there a faster way? Or, a way around the problem?
This is my code:
bool matchFeatures(Mat img_1, Mat img_2)
{
points_2D_left.clear();
points_2D_right.clear();
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400; SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
points_2D_left.push_back(keypoints_1[good_matches[i].queryIdx].pt);
points_2D_right.push_back(keypoints_2[good_matches[i].trainIdx].pt);
}
return true;
}
SURF is slow. Try to use ORB, which operates in real time.
OrbFeatureDetector

Having some difficulty in using findHomography -compilation error

here is the code for Features2D + Homography to find a known object from open cv documentation
#include<opencv\cv.h>
#include <opencv2\core\core.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\nonfree\nonfree.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <opencv2\imgproc\imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
/** #function main */
int main(){
/*-- Load the images --*/
Mat image1= imread("C:\\panL.jpg");
Mat image2 = imread("C:\\panR.jpg");
if (!image1.data || !image2.data)
{
cout << " --(!) Error reading images " << endl; return -1;
}
imshow("first image", image2);
imshow("second image", image1);
/*-- Detecting the keypoints using SURF Detector --*/
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(image1, keypoints_1);
detector.detect(image2, keypoints_2);
/*-- Calculating descriptors (feature vectors) --*/
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(image1, keypoints_1, descriptors_1);
extractor.compute(image2, keypoints_2, descriptors_2);
/*-- Step 3: Matching descriptor vectors using FLANN matcher --*/
FlannBasedMatcher matcher;
vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
//-- Quick calculation of max and min distances between keypoints
double max_dist = 0; double min_dist = 100;
for (int i = 0; i < descriptors_1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
cout << "-- Max dist :" << max_dist << endl;
cout << "-- Min dist :" << min_dist << endl;
/*-- Drawing matches whose distance is less than 2*min_dist,
*-- or a small arbitary value ( 0.02 ) in the event that min_dist is verysmall)
*/
vector< DMatch > good_matches;
for (int i = 0; i < descriptors_1.rows; i++)
{
if (matches[i].distance <= max(2 * min_dist, 0.02))
{
good_matches.push_back(matches[i]);
}
}
/*-- Draw only good matches --*/
Mat img_matches;
drawMatches(image1, keypoints_1, image2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
/*-- Show detected matches --*/
imshow("Good Matches", img_matches);
for (int i = 0; i < (int)good_matches.size(); i++)
{
cout << "-- Good Match [i] Keypoint 1: " << good_matches[i].queryIdx << " -- Keypoint 2:" << good_matches[i].trainIdx << endl;
}
vector< Point2f > obj;
vector< Point2f > scene;
if (good_matches.size() >= 4)
{
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_1[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_2[good_matches[i].trainIdx].pt);
}
// Find the Homography Matrix
Mat H = findHomography(obj, scene, CV_RANSAC);
// Use the Homography Matrix to warp the images
Mat result;
warpPerspective(image1, result, H, Size(image1.cols + image2.cols, image1.rows));
Mat half(result, Rect(0, 0, image2.cols, image2.rows));
image2.copyTo(half);
imshow("Result", result);
}
waitKey(0);
return 0;
}
on compilation it gives 2 error:
Error 5 error LNK2019: unresolved external symbol "class cv::Mat __cdecl cv::findHomography(class cv::_InputArray const &,class cv::_InputArray const &,int,double,class cv::_OutputArray const &)" (?findHomography#cv##YA?AVMat#1#AEBV_InputArray#1#0HNAEBV_OutputArray#1##Z) referenced in function main C:\Users\Paradox\Documents\Visual Studio 2013\Projects\Stiching~1\Stiching~1\Source.obj Stiching~1
Error 6 error LNK1120: 1 unresolved externals C:\Users\Paradox\Documents\Visual Studio 2013\Projects\Stiching~1\x64\Debug\Stiching~1.exe 1 1 Stiching~1
You get the link errors because you don't link the OpenCV libs. You can add the following libs to VS2013 Project's Properties > Linker > Input > Additional Dependencies (assume you're using OpenCV-2.4.8 in Debug mode):
opencv_videostab248d.lib
opencv_video248d.lib
opencv_ts248d.lib
opencv_superres248d.lib
opencv_stitching248d.lib
opencv_photo248d.lib
opencv_ocl248d.lib
opencv_objdetect248d.lib
opencv_nonfree248d.lib
opencv_ml248d.lib
opencv_legacy248d.lib
opencv_imgproc248d.lib
opencv_highgui248d.lib
opencv_gpu248d.lib
opencv_flann248d.lib
opencv_features2d248d.lib
opencv_core248d.lib
opencv_contrib248d.lib
opencv_calib3d248d.lib
It will be much easier if you're using CMake, which can be simply done by:
target_link_libraries(yourProject ${OpenCV_LIBS})

How to Access Points location on OpenCV Matcher?

I am using this FLANN matcher algorithm to match interest points in 2 pictures the code is displayed below).
There is a moment when the code finds a list of matched points:
std::vector<DMatch> good_matches;
I would like to get the points localization (x,y) in both pictures. To create a displacement map. How could I access these points localization?
Cheers,
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
void readme();
/** #function main */
int main(int argc, char** argv) {
if (argc != 3) {
readme();
return -1;
}
// Transform in GrayScale
Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
// Checks if the image could be loaded
if (!img_1.data || !img_2.data) {
std::cout << " --(!) Error reading images " << std::endl;
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = matches[i].distance;
// printf("-- DISTANCE = [%f]\n", dist);
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector<DMatch> good_matches;
for (int i = 0; i < descriptors_1.rows; i++) {
if (matches[i].distance < 2 * min_dist) {
good_matches.push_back(matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches(img_1, keypoints_1, img_2, keypoints_2, good_matches,
img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
for (int i = 0; i < good_matches.size(); i++) {
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i,
good_matches[i].queryIdx, good_matches[i].trainIdx);
}
waitKey(0);
return 0;
}
/** #function readme */
void readme() {
std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl;
}
matched_points1 and 2 will be the corresponding points in the left and right images. Then, you can find the indices of the good_matches with idx1=good_matches[i].trainIdx for the left image and idx2=good_matches[i].queryIdx for the right image. Then just add the corresponding points to your matched_points vector to obtain the x,y point vector of the matches.
long num_matches = good_matches.size();
vector<Point2f> matched_points1;
vector<Point2f> matched_points2;
for (int i=0;i<num_matches;i++)
{
int idx1=good_matches[i].trainIdx;
int idx2=good_matches[i].queryIdx;
matched_points1.push_back(points1[idx1]);
matched_points2.push_back(points2[idx2]);
}
Now you have two vectors of the matched points. I think that's what you're asking?