i'm writing an algorithm to stablize a video, my current process is:
Get features with SURF
Build feature Kd-Tree then match each two consecutive frames. I'm using the Knnmatch from openCV, but i'm not sure if it uses Kd-trees.
cvFindHomography to compute the homography
Warp with the inverse of the homography matrix
The code right now is as follows:
void Stab::process()
{
Mat gray;
Mat img_keypoints_1;
cvtColor(frame, gray, COLOR_BGR2GRAY);
if(first == false)
{
prev_frame = frame.clone();
first = true;
}
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
detector->detectAndCompute( prev_frame, noArray(), keypoints1, descriptors1 );
detector->detectAndCompute( frame, noArray(), keypoints2, descriptors2 );
//-- Step 2: Matching descriptor vectors with a FLANN based matcher
// Since SURF is a floating-point descriptor NORM_L2 is used
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
std::vector< std::vector<DMatch> > knn_matches;
matcher->knnMatch( descriptors1, descriptors2, knn_matches, 2 );
//-- Filter matches using the Lowe's ratio test
const float ratio_thresh = 0.7f;
std::vector<DMatch> good_matches;
for (size_t i = 0; i < knn_matches.size(); i++)
{
if (knn_matches[i][0].distance < ratio_thresh * knn_matches[i][1].distance)
{
good_matches.push_back(knn_matches[i][0]);
}
}
Mat HInv;
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, RANSAC );
HInv = H.inv();
warpPerspective(frame, im_out, HInv, prev_frame.size());
prev_frame = frame.clone();
//waitKey(30);
}
But, the results i'm getting are pretty bad, is there any optimization that can be done or should i switch to some other method?
Related
I am trying to stitch two images. tech stack is opecv c++ on vs 2017.
The image that I had considered are:
image1 of code :
and
image2 of code:
I have found the homoography matrix using this code. I have considered image1 and image2 as given above.
int minHessian = 400;
Ptr<SURF> detector = SURF::create(minHessian);
vector< KeyPoint > keypoints_object, keypoints_scene;
detector->detect(gray_image1, keypoints_object);
detector->detect(gray_image2, keypoints_scene);
Mat img_keypoints;
drawKeypoints(gray_image1, keypoints_object, img_keypoints);
imshow("SURF Keypoints", img_keypoints);
Mat img_keypoints1;
drawKeypoints(gray_image2, keypoints_scene, img_keypoints1);
imshow("SURF Keypoints1", img_keypoints1);
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_object, descriptors_scene;
detector->compute(gray_image1, keypoints_object, descriptors_object);
detector->compute(gray_image2, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
vector< DMatch > matches;
matcher->match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist: %f \n", max_dist);
printf("-- Min dist: %f \n", min_dist);
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
vector< DMatch > good_matches;
Mat result, H;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(gray_image1, keypoints_object, gray_image2, keypoints_scene, good_matches, img_matches, Scalar::all(-1),
Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
imshow("Good Matches", img_matches);
std::vector< Point2f > obj;
std::vector< Point2f > scene;
cout << "Good Matches detected" << good_matches.size() << endl;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
// Find the Homography Matrix for img 1 and img2
H = findHomography(obj, scene, RANSAC);
The next step would be to warp these. I used perspectivetransform function to find the corner of image1 on the stitched image. I had considered this as the number of columns to be used in the Mat result.This is the code I wrote ->
vector<Point2f> imageCorners(4);
imageCorners[0] = Point(0, 0);
imageCorners[1] = Point(image1.cols, 0);
imageCorners[2] = Point(image1.cols, image1.rows);
imageCorners[3] = Point(0, image1.rows);
vector<Point2f> projectedCorners(4);
perspectiveTransform(imageCorners, projectedCorners, H);
Mat result;
warpPerspective(image1, result, H, Size(projectedCorners[2].x, image1.rows));
Mat half(result, Rect(0, 0, image2.cols, image2.rows));
image2.copyTo(half);
imshow("result", result);
I am getting a stitched output of these images. But the issue is with the size of the image. I was doing a comparison by combining the two original images manually with the result of the above code. The size of the result from code is more. What should I do to make it of perfect size? The ideal size should be image1.cols + image2.cols - overlapping length.
warpPerspective(image1, result, H, Size(projectedCorners[2].x, image1.rows));
This line seems problematic.
You should choose the extremum points for the size.
Rect rec = boundingRect(projectedCorners);
warpPerspective(image1, result, H, rec.size());
But you will lose the parts if rec.tl() falls to negative axes, so you should shift the homography matrix to fall in the first quadrant.
See Warping to perspective section of my answer to Fast and Robust Image Stitching Algorithm for many images in Python.
I am trying to construct a panoromic view from different images.
Initially I tried to stitch two images as part of panoromic construction.
The two input images I am trying to stitch are:
I used ORB feature descriptor to find features in the image,then I found out Homography matrix between these two images.
My code is:
int main(int argc, char **argv){
Mat img1 = imread(argv[1],1);
Mat img2 = imread(argv[2],1);
//-- Step 1: Detect the keypoints using orb Detector
std::vector<KeyPoint> kp2,kp1;
// Default parameters of ORB
int nfeatures=500;
float scaleFactor=1.2f;
int nlevels=8;
int edgeThreshold=15; // Changed default (31);
int firstLevel=0;
int WTA_K=2;
int scoreType=ORB::HARRIS_SCORE;
int patchSize=31;
int fastThreshold=20;
Ptr<ORB> detector = ORB::create(
nfeatures,
scaleFactor,
nlevels,
edgeThreshold,
firstLevel,
WTA_K,
scoreType,
patchSize,
fastThreshold );
Mat descriptors_img1, descriptors_img2;
//-- Step 2: Calculate descriptors (feature vectors)
detector->detect(img1, kp1,descriptors_img1);
detector->detect(img2, kp2,descriptors_img2);
Ptr<DescriptorExtractor> extractor = ORB::create();
extractor->compute(img1, kp1, descriptors_img1 );
extractor->compute(img2, kp2, descriptors_img2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
if ( descriptors_img1.empty() )
cvError(0,"MatchFinder","1st descriptor empty",__FILE__,__LINE__);
if ( descriptors_img2.empty() )
cvError(0,"MatchFinder","2nd descriptor empty",__FILE__,__LINE__);
descriptors_img1.convertTo(descriptors_img1, CV_32F);
descriptors_img2.convertTo(descriptors_img2, CV_32F);
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_img1,descriptors_img2,matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_img1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist )
min_dist = dist;
if( dist > max_dist )
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_img1.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{
good_matches.push_back( matches[i]);
}
}
Mat img_matches;
drawMatches(img1,kp1,img2,kp2,good_matches,img_matches,Scalar::all(-1),
Scalar::all(-1),vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( kp1[ good_matches[i].queryIdx ].pt );
scene.push_back( kp2[ good_matches[i].trainIdx ].pt );
}
Mat H = findHomography( obj, scene, CV_RANSAC );
After wards some people told me to include the following code
cv::Mat result;
warpPerspective( img1, result, H, cv::Size( img1.cols+img2.cols, img1.rows) );
cv::Mat half(result, cv::Rect(0, 0, img2.cols, img2.rows) );
img2.copyTo(half);
imshow("result",result);
The result I got is
I also tried using inbuilt opencv stitch function. And I got the result
I am trying to implement stitch function so I dont want to use inbuilt opencv stitch function.
Can any one tell me where I went wrong and correct my code.Thanks in advance
Image stitching includes the following steps:
Feature finding
Find camera parameters
Warping
Exposure compensation
Seam Finding
Blending
You have to do all these steps in order to get the perfect result.
In your code you have only done the first part, that is feature finding.
You can find a detailed explanation on how image stitching works in Learn OpenCV
Also I have the code on Github
Hope this helps.
Using this code to find matches between images:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main(int argc, char *argv[]) {
//cv::initModule_nonfree();
//initModule_features2d();
Mat img_1 = imread("C:/Users/Dan/Desktop/0.jpg", 1);
Mat img_2 = imread("C:/Users/Dan/Desktop/0.jpg", 1);
cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
//-- Step 1: Detect the keypoints:
std::vector<KeyPoint> keypoints_1, keypoints_2;
f2d->detect(img_1, keypoints_1);
f2d->detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_1, descriptors_2;
f2d->compute(img_1, keypoints_1, descriptors_1);
f2d->compute(img_2, keypoints_2, descriptors_2);
Mat out0;
drawKeypoints(img_1, keypoints_1, out0);
imshow("KeyPoint0.jpg", out0);
//-- Step 3: Matching descriptor vectors using BFMatcher :
BFMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
Mat img_matches = Mat::zeros( img_1.size(), CV_8UC3 );
drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);
imshow("matches", img_matches);
waitKey(0); // Keep window there until user presses 'q' to quit.
return 0;
}
Since OpenCV 3.1 functions were changed, I looked for example code using SURF or SIFT, but could not find any.
How to modify this code so it will draw contours around detected objects similar to OpenCV version?
You will need to use findHomography to get the transformation that relates your training image (img_1) to the image to be detected (img_2)
Then you can simply do a perspectiveTransform on a bounding box of your training image (at origin) using the homography obtained, to place the correct bounding box on the detected image
Related code taken from ORB detection example
Mat inlier_mask, homography;
vector<KeyPoint> inliers1, inliers2;
vector<DMatch> inlier_matches;
if(matched1.size() >= 4) {
homography = findHomography(Points(matched1), Points(matched2),
RANSAC, ransac_thresh, inlier_mask);
}
for(unsigned i = 0; i < matched1.size(); i++) {
if(inlier_mask.at<uchar>(i)) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
inlier_matches.push_back(DMatch(new_i, new_i, 0));
}
}
stats.inliers = (int)inliers1.size();
stats.ratio = stats.inliers * 1.0 / stats.matches;
vector<Point2f> new_bb;
perspectiveTransform(object_bb, new_bb, homography);
Mat frame_with_bb = frame.clone();
if(stats.inliers >= bb_min_inliers) {
drawBoundingBox(frame_with_bb, new_bb);
}
Mat res;
drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
inlier_matches, res,
Scalar(255, 0, 0), Scalar(255, 0, 0));
I'm trying to extract and match features with OpenCV using ORB for detecting and FLANN for matching, and i get a really weird result. After loading my 2 images and converting them to grayscale, here's my code:
// Initiate ORB detector
Ptr<FeatureDetector> detector = ORB::create();
// find the keypoints and descriptors with ORB
detector->detect(gray_image1, keypoints_object);
detector->detect(gray_image2, keypoints_scene);
Ptr<DescriptorExtractor> extractor = ORB::create();
extractor->compute(gray_image1, keypoints_object, descriptors_object );
extractor->compute(gray_image2, keypoints_scene, descriptors_scene );
// Flann needs the descriptors to be of type CV_32F
descriptors_scene.convertTo(descriptors_scene, CV_32F);
descriptors_object.convertTo(descriptors_object, CV_32F);
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{
good_matches.push_back( matches[i]);
}
}
vector< Point2f > obj;
vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective(image1,result,H,Size(image1.cols+image2.cols,image1.rows));
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);
imshow( "Result", result );
And this is a screen shot of the weird result i'm getting:
screen shot
What might be the problem?
Thanks!
You are experiencing the results of a bad matching: The homography which fits the data is not "realistic" and thus distorts the image.
You can debug your matching with imshow( "Good Matches", img_matches ); as done in the example.
There are multiple approaches to improve your matches:
Use the crossCheck option
Use the SIFT ratio test
Use the OutputArray mask in cv::findHompgraphy to identify totally wrong homography computations
... and so on...
ORB are binary feature vectors which don't work with Flann. Use Brute Force (BFMatcher) instead.
I am to apply a warpperspective using opencv to mount a mosaic with varios images, but, i am with a very problem...
When i applied a cvWarpPerspective, a generate image don't show in window.
Appear just a part of image and i need to know how to discover a coordinates (0,0) of my image after to apply a warpperspective.
Is possible see, that in first image, a part of image is cut if to compare with a second image presented here.
Therefore, my problem is: how to discover coordinates of start after to apply a warpperspective ?
I need help to solve this problem.
How can i solve this problem using tool of opencv ?
How can i solve this problem using opencv ?
This is my code:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
void readme();
/** #function main */
int main( int argc, char** argv )
{
// Load the images
Mat image1= imread( "f.jpg");
Mat image2= imread( "e.jpg" );
Mat gray_image1;
Mat gray_image2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
imshow("first image",image2);
imshow("second image",image1);
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 100;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC);
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective(image1,result,H,cv::Size());
imshow("WARP", result);
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);
Mat key;
//drawKeypoints(image1,keypoints_scene,key,Scalar::all(-1), DrawMatchesFlags::DEFAULT );
//drawMatches(image2, keypoints_scene, image1, keypoints_object, matches, result);
imshow( "Result", result );
imwrite("teste.jpg", result);
waitKey(0);
return 0;
}
/** #function readme */
void readme()
{ std::cout << " Usage: Panorama < img1 > < img2 >" << std::endl; }
In this image appears a second image cut. See
I want that my image appears in this form:
The below modification should solve your problem of removing black part of the stitched image.
Try changing this line:
warpPerspective(image1,result,H,cv::Size());
to
warpPerspective(image1,result,H,cv::Size(image1.cols+image2.cols,image1.rows));
This creates the result matrix with the number of rows equal to that of image1, thereby avoiding the unwanted rows to be created.