Find good keypoints match code - c++

I found this code inside LATCH_match.cpp in xfeature2D folder:
BFMatcher matcher(NORM_HAMMING);
vector< vector<DMatch> > nn_matches;
matcher.knnMatch(desc1, desc2, nn_matches, 2);
vector<KeyPoint> matched1, matched2, inliers1, inliers2;
vector<DMatch> good_matches;
for (unsigned i = 0; i < matched1.size(); i++) {
Mat col = Mat::ones(3, 1, CV_64F);
col.at<double>(0) = matched1[i].pt.x;
col.at<double>(1) = matched1[i].pt.y;
col = homography * col;
col /= col.at<double>(2);
double dist = sqrt(pow(col.at<double>(0) - matched2[i].pt.x, 2) +
pow(col.at<double>(1) - matched2[i].pt.y, 2));
if (dist < inlier_threshold) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
good_matches.push_back(DMatch(new_i, new_i, 0));
}
}
Mat res;
drawMatches(img1, inliers1, img2, inliers2, good_matches, res);
imwrite("latch_res.png", res);
I see that this a kind of ratio test, but I don't get what's the point of inliers and homography

Related

stitch two images in opencv C++

I want to make a musaic image by stitching some images.
I am doing this by sift features.
when I stitch the following two images :
img1
img2
I achieve this result. as you can see the images are not aligned completely and there is a gap between them. Can you tell me what is the problem and how can I solve it?
Here is my function that I used for stitching :
Mat StitchVertical(Mat& imGrayImg1, Mat& imGrayImg2)
{
Mat GrayImg1(imGrayImg1, Rect(0, 0, imGrayImg1.cols, imGrayImg1.rows)); // init roi
Mat GrayImg2(imGrayImg2, Rect(0, 0, imGrayImg2.cols, imGrayImg2.rows));
cvtColor(GrayImg1, GrayImg1, COLOR_BGR2GRAY);
cvtColor(GrayImg2, GrayImg2, COLOR_BGR2GRAY);
vector<cv::KeyPoint> keypointsImg1, keypointsImg2;
Mat descriptorImg1, descriptorImg2;
BFMatcher matcher;
vector<cv::DMatch> matches, good_matches;
cv::Ptr<cv::SIFT> sift = cv::SIFT::create();
int i, dist = 80;
sift->detectAndCompute(GrayImg1, cv::Mat(), keypointsImg1, descriptorImg1); /* get keypoints of ROI image */
sift->detectAndCompute(GrayImg2, cv::Mat(), keypointsImg2, descriptorImg2); /* get keypoints of the image */
matcher.match(descriptorImg1, descriptorImg2, matches); // Matching between descriptors
double max_dist = 0; double min_dist = 5000;
for (int i = 0; i < descriptorImg1.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
for (i = 0; i < descriptorImg1.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
// Draw match
drawMatches(imGrayImg1, keypointsImg1, imGrayImg2, keypointsImg2,
good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//imshow("matches", img_matches);
vector<Point2f> keypoints1, keypoints2;
for (i = 0; i < good_matches.size(); i++)
{
keypoints1.push_back(keypointsImg2[good_matches[i].trainIdx].pt);
keypoints2.push_back(keypointsImg1[good_matches[i].queryIdx].pt);
}
Mat H, H2;
H = findHomography(keypoints1, keypoints2, RANSAC);
H2 = findHomography(keypoints2, keypoints1, RANSAC);
Mat stitchedImage;
int mRows = imGrayImg2.cols;
if (imGrayImg1.cols > imGrayImg2.cols)
{
mRows = imGrayImg1.cols;
}
int count = 0;
for (int i = 0; i < keypoints2.size(); i++)
{
if (keypoints2[i].y >= imGrayImg2.rows / 3) { count++; }
}
int minx,miny;
if (count / float(keypoints2.size()) >= 0.5) // To be spliced imGrayImg2 The image is on the right
{
cout << "imGrayImg1 should left" << endl;
vector<Point2f>corners(4);
vector<Point2f>corners2(4);
corners[0] = Point(0, 0);
corners[1] = Point(0, imGrayImg2.rows);
corners[2] = Point(imGrayImg2.cols, imGrayImg2.rows);
corners[3] = Point(imGrayImg2.cols, 0);
stitchedImage = Mat::zeros( mRows, imGrayImg2.rows + imGrayImg1.rows, CV_8UC3);
warpPerspective(imGrayImg2, stitchedImage, H, Size(mRows, imGrayImg1.cols + imGrayImg2.cols));
perspectiveTransform(corners, corners2, H);
Mat half(stitchedImage, Rect(0, 0, imGrayImg1.cols, imGrayImg1.rows));
imGrayImg1.copyTo(half);
minx = stitchedImage.size().width;
miny = stitchedImage.size().height;
if ((int(corners2[2].x) - 10) < stitchedImage.size().width) {
minx = (int(corners2[2].x) - 10);
}
if ((int(corners2[2].y) - 10) < stitchedImage.size().height) {
miny = (int(corners2[2].y) - 10);
}
Rect crop_region(0, 0, minx, miny);
Mat cropped_image = stitchedImage(crop_region);
return cropped_image;
}
else
{
cout << "imGrayImg2 should be up" << endl;
stitchedImage = Mat::zeros(mRows, imGrayImg2.rows + imGrayImg1.rows, CV_8UC3);
warpPerspective(imGrayImg1, stitchedImage, H2, Size(mRows, imGrayImg1.cols + imGrayImg2.cols));
//imshow("temp", stitchedImage);
vector<Point2f>corners(4);
vector<Point2f>corners2(4);
corners[0] = Point(0, 0);
corners[1] = Point(0, imGrayImg1.rows);
corners[2] = Point(imGrayImg1.cols, imGrayImg1.rows);
corners[3] = Point(imGrayImg1.cols, 0);
cout << "second if in up and down" << endl;
perspectiveTransform(corners, corners2, H2); // Affine transformations correspond to endpoints
Mat half(stitchedImage, Rect(0, 0, imGrayImg2.cols, imGrayImg2.rows));
imGrayImg2.copyTo(half);
minx = stitchedImage.size().width;
miny = stitchedImage.size().height;
if ((int(corners2[2].x) - 10) < stitchedImage.size().width) {
minx = (int(corners2[2].x) - 10);
}
if ((int(corners2[2].y) - 10) < stitchedImage.size().height) {
miny = (int(corners2[2].y) - 10);
}
Rect crop_region(0, 0, minx, miny);
Mat cropped_image = stitchedImage(crop_region); return cropped_image;
}
imwrite("result.bmp", stitchedImage);
return stitchedImage;
}

Getting a book image using opencv and c++

I'm trying to detect the following book, using findcontours but it cannot be detected at all and I get exception because there is no convex hull.
I tried to blur, dilate, canny detection, with no success at all.
I hope to get a solution for finding a rectangular paper/book using openCV.
Please let me know if you have further questions or need resources.
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2) / sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void find_squares(Mat& image, vector<vector<Point> >& squares)
{
// blur will enhance edge detection
Mat blurred(image);
Mat dst;
medianBlur(image, dst, 9);
Mat gray0(dst.size(), CV_8U), gray;
vector<vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = { c, 0 };
mixChannels(&dst, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
}
else
{
gray = gray0 >= (l + 1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j % 4], approx[j - 2], approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
cv::Mat debugSquares(std::vector<std::vector<cv::Point> > squares, cv::Mat image)
{
for (int i = 0; i< squares.size(); i++) {
// draw contour
cv::drawContours(image, squares, i, cv::Scalar(255, 0, 0), 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
// draw bounding rect
cv::Rect rect = boundingRect(cv::Mat(squares[i]));
cv::rectangle(image, rect.tl(), rect.br(), cv::Scalar(0, 255, 0), 2, 8, 0);
// draw rotated rect
cv::RotatedRect minRect = minAreaRect(cv::Mat(squares[i]));
cv::Point2f rect_points[4];
minRect.points(rect_points);
for (int j = 0; j < 4; j++) {
cv::line(image, rect_points[j], rect_points[(j + 1) % 4], cv::Scalar(0, 0, 255), 1, 8); // blue
}
}
return image;
}
static std::vector<cv::Point> extremePoints(std::vector<cv::Point>pts)
{
int xmin = 0, ymin = 0, xmax = -1, ymax = -1, i;
Point ptxmin, ptymin, ptxmax, ptymax;
Point pt = pts[0];
ptxmin = ptymin = ptxmax = ptymax = pt;
xmin = xmax = pt.x;
ymin = ymax = pt.y;
for (size_t i = 1; i < pts.size(); i++)
{
pt = pts[i];
if (xmin > pt.x)
{
xmin = pt.x;
ptxmin = pt;
}
if (xmax < pt.x)
{
xmax = pt.x;
ptxmax = pt;
}
if (ymin > pt.y)
{
ymin = pt.y;
ptymin = pt;
}
if (ymax < pt.y)
{
ymax = pt.y;
ptymax = pt;
}
}
std::vector<cv::Point> res;
res.push_back(ptxmin);
res.push_back(ptxmax);
res.push_back(ptymin);
res.push_back(ptymax);
return res;
}
void sortCorners(std::vector<cv::Point2f>& corners)
{
std::vector<cv::Point2f> top, bot;
cv::Point2f center;
// Get mass center
for (int i = 0; i < corners.size(); i++)
center += corners[i];
center *= (1. / corners.size());
for (int i = 0; i < corners.size(); i++)
{
if (corners[i].y < center.y)
top.push_back(corners[i]);
else
bot.push_back(corners[i]);
}
corners.clear();
if (top.size() == 2 && bot.size() == 2) {
cv::Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
cv::Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
cv::Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
cv::Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];
corners.push_back(tl);
corners.push_back(tr);
corners.push_back(br);
corners.push_back(bl);
}
}
int main(int, char**)
{
int largest_area = 0;
int largest_contour_index = 0;
cv::Rect bounding_rect;
Mat src, edges;
src = imread("20628991_10159154614610574_1244594322_o.jpg");
cvtColor(src, edges, COLOR_BGR2GRAY);
GaussianBlur(edges, edges, Size(5, 5), 1.5, 1.5);
erode(edges, edges, Mat());// these lines may need to be optimized
dilate(edges, edges, Mat());
dilate(edges, edges, Mat());
erode(edges, edges, Mat());
Canny(edges, edges, 150, 150, 3); // canny parameters may need to be optimized
imshow("edges", edges);
vector<Point> selected;
vector<vector<Point> > contours;
findContours(edges, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
for (size_t i = 0; i < contours.size(); i++)
{
Rect minRect = boundingRect(contours[i]);
if (minRect.width > 150 & minRect.height > 150) // this line also need to be optimized
{
selected.insert(selected.end(), contours[i].begin(), contours[i].end());
}
}
convexHull(selected, selected);
RotatedRect minRect = minAreaRect(selected);
std::vector<cv::Point> corner_points = extremePoints(selected);
std::vector<cv::Point2f> corners;
corners.push_back(corner_points[0]);
corners.push_back(corner_points[1]);
corners.push_back(corner_points[2]);
corners.push_back(corner_points[3]);
sortCorners(corners);
cv::Mat quad = cv::Mat::zeros(norm(corners[1] - corners[2]), norm(corners[2] - corners[3]), CV_8UC3);
std::vector<cv::Point2f> quad_pts;
quad_pts.push_back(cv::Point2f(0, 0));
quad_pts.push_back(cv::Point2f(quad.cols, 0));
quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
quad_pts.push_back(cv::Point2f(0, quad.rows));
cv::Mat transmtx = cv::getPerspectiveTransform(corners, quad_pts);
cv::warpPerspective(src, quad, transmtx, quad.size());
resize(quad, quad, Size(), 0.25, 0.25); // you can remove this line to keep the image original size
imshow("quad", quad);
polylines(src, selected, true, Scalar(0, 0, 255), 2);
resize(src, src, Size(), 0.5, 0.5); // you can remove this line to keep the image original size
imshow("result", src);
waitKey(0);
return 0;
}
Strange, I did it with exactly that (blur, dilate, canny):
The code (in Python, but there's nothing but OpenCV function calls, so should be easy to follow; as one of the references I used this answer, which is in C++, it also shows how to correct the perspective and turn it into a rectangle):
import numpy as np
import cv2
img = cv2.imread('sngo1.jpg')
#resize and create a copy for future drawing
resize_coeff = 0.5
w, h, c = img.shape
img_in = cv2.resize(img, (int(resize_coeff*h), int(resize_coeff*w)))
img_out = img_in.copy()
#median and canny
img_in = cv2.medianBlur(img_in, 5)
img_in = cv2.Canny(img_in, 100, 200)
#morphological close for our edges
kernel = np.ones((17, 17), np.uint8)
img_in = cv2.morphologyEx(img_in, cv2.MORPH_CLOSE, kernel, iterations = 1)
#find contours, get max by area
img_in, contours, hierarchy = cv2.findContours(img_in, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_index, max_area = max(enumerate([cv2.contourArea(x) for x in contours]), key = lambda x: x[1])
max_contour = contours[max_index]
#approximage it with a quadrangle
approx = cv2.approxPolyDP(max_contour, 0.1*cv2.arcLength(max_contour, True), True)
approx = approx[:,0,:]
cv2.drawContours(img_out, [approx], 0, (255, 0, 0), 2)
cv2.imwrite("result.png", img_out)

opencv find homography using feature matching

I am calculating homography with reference here.
http://docs.opencv.org/2.4/doc/tutorials/features2d/feature_homography/feature_homography.html
However, there is an error even though it is a reference code.
The min_dist value is always 0, so the size of good_matches is 0, in the code.
What is wrong and how can we solve it?
int main(int argc, char **argv)
{
cv::VideoCapture cam(0);
cv::Mat img(480, 640, CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat img2(480, 640, CV_8UC3, cv::Scalar(0, 0, 0));
cv::Ptr<cv::AKAZE> akaze = cv::AKAZE::create();
std::vector<cv::KeyPoint> kp;
cv::Mat descr;
while (1)
{
cam >> img;
cv::Mat aka;
akaze->detectAndCompute(img, cv::noArray(), kp, descr);
cv::drawKeypoints(img, kp, aka);
cv::imshow("AKAZE", aka);
if (cv::waitKey(10) == 's')
{
break;
}
kp.clear();
}
akaze.release();
std::vector<cv::KeyPoint> kp_query;
cv::Mat descr_query;
cv::Ptr<cv::AKAZE> akaze2 = cv::AKAZE::create();
while (1)
{
cv::Mat aka;
cam >> img2;
akaze2->detectAndCompute(img2, cv::noArray(), kp_query, descr_query);
cv::BFMatcher matcher(cv::NORM_HAMMING);
std::vector<cv::DMatch> matches;
matcher.match(descr_query, descr, matches);
double max_dist = 0, min_dist = 100.0;
for (int i = 0; i<descr_query.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
std::vector<cv::DMatch> good_matches;
for (int i = 0; i<descr_query.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
good_matches.push_back(matches[i]);
}
std::vector<cv::Point2f> train, query;
for (int i = 0; i<good_matches.size(); i++)
{
train.push_back(kp[good_matches[i].queryIdx].pt);
query.push_back(kp_query[good_matches[i].trainIdx].pt);
}
cv::Mat output_mask;
cv::Mat H = cv::findHomography(train, query, CV_RANSAC, 3, output_mask);
cv::Mat m;
cv::drawMatches(img, kp, img2, kp_query, good_matches, m, cv::Scalar::all(-1), cv::Scalar::all(-1),
std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
cv::imshow("match", m);
cv::drawKeypoints(img2, kp_query, aka);
cv::waitKey(10);
kp_query.clear();
}
}
So far, I could't see any wrong procedure in your code, But you have 3 Channel 8-Bit Image there. Try to use a 1 Channel 8-Bit Image, because that is the format of the Images in your linked tutorial. So try changing cv::Mat img2(480, 640, CV_8UC3, cv::Scalar(0, 0, 0)); to cv::Mat img2(480, 640, CV_8UC1, cv::Scalar(0, 0, 0)); For both images. If this is not working for cam >> img then you have to try to convert image to CV_8UC1 after you read it from your cam.

OpenCv - Assertion failed in svm train

I can find why i this happening. I have 5 classes and made labels and trainData but it gives assertion failed. Why? Labels are 1x1x884 ( i tried 1x884x1 also, it is the same ), and fmat 1 x 884 x 680400 , all float32. Error is :
and code:
HOGDescriptor hog;
vector<vector< float>> features;
int labels[884] = {};
int brojac = 0;
cv::String path1("C:/Users/Kristina/Desktop/Kompjuterska vizija/PrviProjekat/PrviProjekat/Chihuahua/*.jpg"); //select only jpg
vector<cv::String> fn;
vector<cv::Mat> data;
cv::glob(path1, fn, true); // recurse
for (size_t k = 0; k<fn.size(); ++k)
{
cv::Mat im = cv::imread(fn[k]);
if (im.empty()) continue; //only proceed if sucsessful
// you probably want to do some preprocessing
cvtColor(im, im, CV_RGB2GRAY);
resize(im, im, Size(200, 200));//resize image
vector< float> descriptorsValues;
hog.compute(im, descriptorsValues);
features.push_back(descriptorsValues);
labels[brojac] = 1;
brojac++;
data.push_back(im);
}
cv::String path2("C:/Users/Kristina/Desktop/Kompjuterska vizija/PrviProjekat/PrviProjekat/ShihTzu/*.jpg"); //select only jpg
vector<cv::String> fn2;
vector<cv::Mat> data2;
cv::glob(path2, fn2, true); // recurse
for (size_t k = 0; k<fn2.size(); ++k)
{
cv::Mat im = cv::imread(fn2[k]);
vector< float> descriptorsValues;
if (im.empty()) continue; //only proceed if sucsessful
// you probably want to do some preprocessing
cvtColor(im, im, CV_RGB2GRAY);
resize(im, im, Size(200, 200));//resize image
hog.compute(im, descriptorsValues);
features.push_back(descriptorsValues);
labels[brojac] = 2;
brojac++;
data.push_back(im);
}
cv::String path3("C:/Users/Kristina/Desktop/Kompjuterska vizija/PrviProjekat/PrviProjekat/Yorkshireterrier/*.jpg"); //select only jpg
vector<cv::String> fn3;
vector<cv::Mat> data3;
cv::glob(path3, fn3, true); // recurse
for (size_t k = 0; k<fn3.size(); ++k)
{
vector< float> descriptorsValues;
cv::Mat im = cv::imread(fn3[k]);
if (im.empty()) continue; //only proceed if sucsessful
// you probably want to do some preprocessing
cvtColor(im, im, CV_RGB2GRAY);
resize(im, im, Size(200, 200));//resize image
hog.compute(im, descriptorsValues);
features.push_back(descriptorsValues);
labels[brojac] = 3;
brojac++;
data.push_back(im);
}
cv::String path4("C:/Users/Kristina/Desktop/Kompjuterska vizija/PrviProjekat/PrviProjekat/vizsla/*.jpg"); //select only jpg
vector<cv::String> fn4;
vector<cv::Mat> data4;
cv::glob(path4, fn4, true); // recurse
for (size_t k = 0; k<fn4.size(); ++k)
{
cv::Mat im = cv::imread(fn4[k]);
vector< float> descriptorsValues;
if (im.empty()) continue; //only proceed if sucsessful
// you probably want to do some preprocessing
cvtColor(im, im, CV_RGB2GRAY);
resize(im, im, Size(200, 200));//resize image
hog.compute(im, descriptorsValues);
features.push_back(descriptorsValues);
labels[brojac] = 4;
brojac++;
data.push_back(im);
}
cv::String path5("C:/Users/Kristina/Desktop/Kompjuterska vizija/PrviProjekat/PrviProjekat/pug/*.jpg"); //select only jpg
vector<cv::String> fn5;
vector<cv::Mat> data5;
cv::glob(path5, fn5, true); // recurse
for (size_t k = 0; k<fn5.size(); ++k)
{
cv::Mat im = cv::imread(fn5[k]);
vector< float> descriptorsValues;
if (im.empty()) continue; //only proceed if sucsessful
// you probably want to do some preprocessing
cvtColor(im, im, CV_RGB2GRAY);
resize(im, im, Size(200, 200));//resize image
hog.compute(im, descriptorsValues);
features.push_back(descriptorsValues);
labels[brojac] = 5;
brojac++;
data.push_back(im);
}
Mat labelsMat(884, 1, CV_32FC1, labels);
Mat fmat(features[0].size(), features.size(), CV_32FC1, features.data());
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
svm->train(fmat, ml::SampleTypes::ROW_SAMPLE, labelsMat);

Opencv stitching multiple images

I have been working on stitching multiple images in opencv in order to create a mosaic.
I followed this link on opencv:
Stitch multiple images using OpenCV (Python)
Here's the code that I have got so far :
// imgstch.cpp :
// imgstch.cpp :
//#include "stdafx.h"
#include<opencv/cv.h>
#include<opencv/highgui.h>
#include<iostream>
#include<stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#define _CRT_SECURE_NO_WARNINGS
using namespace std;
using namespace cv;
int main()
{
//-- Input the two images
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints;
cv::Mat img2_descriptors;
img1 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b2.JPG");
img2 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b1.JPG");
//-- ORB feature detector, extractor and descriptor
int minHessian = 1800;
OrbFeatureDetector detector( minHessian );
OrbDescriptorExtractor extractor;
detector.detect(img1, img1_keypoints);
detector.detect(img2, img2_keypoints);
extractor.compute(img1, img1_keypoints, img1_descriptors);
extractor.compute(img2, img2_keypoints, img2_descriptors);
//-- Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_HAMMING);
std::vector< DMatch > matches;
matcher.match( img1_descriptors, img2_descriptors, matches );
imshow("image1", img1);
imshow("image2",img2);
//-- Draw matches
Mat img_matches;
drawMatches( img1, img1_keypoints, img2, img2_keypoints, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches);
double max_dist = 0; double min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p1, p2;
for (unsigned int i = 0; i < matches.size(); i++) {
p1.push_back(img1_keypoints[matches[i].queryIdx].pt);
p2.push_back(img2_keypoints[matches[i].trainIdx].pt);
}
// HomografĂ­a
vector<unsigned char> match_mask;
Mat H = findHomography(p1,p2,CV_RANSAC);
cout << "H = "<< endl << " " << H << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result;
result=img1.clone();
warpPerspective(img1,result,H,cv::Size(img1.cols+img2.cols,img1.rows));
cv::Mat half(result,cv::Rect(0,0,img2.cols,img2.rows));
img2.copyTo(half);
imwrite("/home/ishita/img_stitch/result.jpg",result);
imshow( "Result", result );
//for images 2 and 3
cv::Mat img3;
std::vector<cv::KeyPoint> img3_keypoints;
cv::Mat img3_descriptors;
img3 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b3.JPG");
//detector.detect(img2, img2_keypoints);
detector.detect(img3, img3_keypoints);
//extractor.compute(img2, img2_keypoints, img2_descriptors);
extractor.compute(img3, img3_keypoints, img3_descriptors);
matcher.match( img1_descriptors, img3_descriptors, matches );
//imshow("image2", img1);
imshow("image3",img3);
//-- Draw matches
Mat img_matches2;
drawMatches( img1, img1_keypoints, img3, img3_keypoints, matches, img_matches2 );
//-- Show detected matches
imshow("Matches2", img_matches2 );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches2);
max_dist = 0; min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches2;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches2.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p3, p4;
for (unsigned int i = 0; i < matches.size(); i++) {
p3.push_back(img1_keypoints[matches[i].queryIdx].pt);
p4.push_back(img3_keypoints[matches[i].trainIdx].pt);
}
// HomografĂ­a
vector<unsigned char> match_mask2;
Mat H2 = findHomography(p3,p4,CV_RANSAC);
Mat H3 = H * H2;
cout << "H2= "<< endl << " " << H2 << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result2;
result2 = result.clone();
warpPerspective(result,result2,H3,cv::Size(img3.cols+result.cols,result.rows));
cv::Mat half2(result,cv::Rect(0,0,img3.cols,img3.rows));
img3.copyTo(half2);
imwrite("/home/ishita/img_stitch/result.jpg",result2);
imshow( "Result2", result2 );
waitKey(0);
return 0;
}
The result of stitching first two images is as required but the result of stitching the third image is not appropriate.
What could be possibly wrong with the logic behind this or in the method's implementation?
The images and the result can be found here :
https://drive.google.com/folderview?id=0BxXVoeIUgVW7fnFMbExhTzN4QnRueXZpQmpILTZCWFZoTlZEckxfWV83VjkxMmFNSXdLVWM&usp=sharing
this answer is too late but may be helpful for other.
in some algorithms dimension of image to be stitching must be same.
(if problem is this test below solution)
1- you could run program once for (img1 , img2)=>result1.
2- then for (img2 , img3)=>result2.
3- (result1 , result2) => final
here is other example that get all images at once:
https://www.geeksforgeeks.org/stitching-input-images-panorama-using-opencv-c/