OpenCV Image frames not stabilizing - c++

I am trying to perform a video stabilization technique using OpenCV and c++. This techniques involves the SURF detection and description, calculating homography and warping perspective based on the homography for each subsequent image frame. From the articles I have seen, I got the impression that this should produce a stabilized video but this is not the case. I can't seem to find out what I'm doing wrong. The following is my code
Mat prev_frame, curr_frame;
int minHessian = 400;
Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> prev_keypoints, curr_keypoints;
Mat prev_descriptors, curr_descriptors;
VideoCapture cap("patio.mp4");
if (!cap.isOpened())
return -1;
cap >> curr_frame;
detector->detectAndCompute( curr_frame, Mat(), curr_keypoints, curr_descriptors );
namedWindow("Original Video");
namedWindow("Modified Video");
moveWindow("Modified Video", 600, 0);
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
for(;;){
prev_frame = curr_frame.clone();
prev_keypoints = curr_keypoints;
prev_descriptors = curr_descriptors.clone();
curr_keypoints.clear();
curr_descriptors = Mat();
cap >> curr_frame;
if( !prev_frame.data || !curr_frame.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
detector->detectAndCompute( curr_frame, Mat(), curr_keypoints, curr_descriptors );
if (curr_keypoints.size() < 20) {
continue;
}
matcher.match( prev_descriptors, curr_descriptors, matches );
double max_dist = 0; double min_dist = 100;
for( int i = 0; i < prev_descriptors.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
std::vector< DMatch > good_matches;
for( int i = 0; i < prev_descriptors.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{
good_matches.push_back( matches[i]);
}
}
std::vector<Point2f> prev;
std::vector<Point2f> curr;
for( size_t i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
prev.push_back( prev_keypoints[ good_matches[i].queryIdx ].pt );
curr.push_back( curr_keypoints[ good_matches[i].trainIdx ].pt );
}
if (prev.size() <=4 || curr.size() <=4){
continue;
}
Mat H = findHomography( prev, curr, RANSAC );
Mat modified;
warpPerspective(curr_frame,modified,H,curr_frame.size());
imshow("Original Video", curr_frame);
imshow("Modified Video", modified);
waitKey(30);
}
waitKey(0);
The original and modified videos are in the following links respectively:
https://www.dropbox.com/s/ilqwdocgankfe6n/input.mp4?dl=0
https://www.dropbox.com/s/1kd981j92drkfrn/output.mwv?dl=0
Please Help.

Related

How to stitch an above and a below image together after found homography matrix?

I try to stitch images as
and
together after find the homography matrix, But I cann't get that two images in the final result. This is my code.
#include < stdio.h>
#include < iostream>
#include < opencv2\opencv.hpp>
#include < opencv2/core/core.hpp>
#include < opencv2/highgui/highgui.hpp>
#include < opencv2\gpu\gpu.hpp>
#include < opencv2\stitching\detail\matchers.hpp >
#include <opencv2/nonfree/features2d.hpp>
using namespace cv;
using namespace std;
Mat TwoInOneOut(Mat Left, Mat Right);
void findHomo(Mat cameraFrame1, Mat cameraFrame2, Mat &H, Mat &result);
int main(){
Mat img3 =imread("33.jpg");
Mat imgFRL =imread("resultFRL.jpg");
Mat H3;
Mat result3;
findHomo(imgFRL,img3,H3,result3);
cout << "homography matrix3: " << H3 << endl;
imshow("result3", result3 );
//imwrite("result3.jpg", result3 );
waitKey(0);
return 0;
}
void findHomo(Mat cameraFrame1, Mat cameraFrame2, Mat &H, Mat &result)
{
int mode=2;
if(mode == 0)
{
imshow("Left", cameraFrame1);
imshow("Right", cameraFrame2);
}
Mat Left(cameraFrame1.rows, cameraFrame1.cols, CV_8U);
Mat Right(cameraFrame1.rows, cameraFrame1.cols, CV_8U);
cvtColor(cameraFrame1, Left, CV_RGB2GRAY, CV_8U);
cvtColor(cameraFrame2, Right, CV_RGB2GRAY, CV_8U);
if (mode == 2)
{
printf("Homography Matrix Processing\n");
H = TwoInOneOut(Left, Right);
mode=1;
destroyWindow("Left");
destroyWindow("Right");
}
if(waitKey(30) == 'r')
{
printf("normal mode\n");
destroyWindow("Processing");
mode=0;
}
if(H.cols == 3 && H.rows == 3)
{
Mat Warping( (Left.rows+Right.rows), max(Left.cols,Right.cols)/*Left.cols*3*/, cameraFrame1.depth() );
result = Warping;
warpPerspective(cameraFrame2, result, H, Size(result.cols, result.rows));
imshow("R1",result);
/*imwrite("R1.jpg",result);*/
Mat tempWarpImg1(result,Rect(0,0,Left.cols,Left.rows));
cameraFrame1.copyTo(tempWarpImg1);
//cameraFrame2.copyTo(tempWarpImg2);
// if(mode ==1)
// imshow("Processing", result
}
//if (waitKey(27) == 'q') destroyAllWindows;
}
Mat TwoInOneOut(Mat Left, Mat Right)
{
Mat H;
if(Left.channels() != 1 || Right.channels() != 1)
{
printf("Channel Error\n");
return H;
}
//Detect the keypoints using SURF Detector
int minHessian = 100; //1500;
SurfFeatureDetector detector( minHessian );
SurfDescriptorExtractor extractor;
/////////////////
//A
std::vector< KeyPoint> kp_Left;
detector.detect( Left, kp_Left );
Mat des_Left;
extractor.compute( Left, kp_Left, des_Left );
/////////////////
//B
std::vector< KeyPoint> kp_Right;
detector.detect( Right, kp_Right );
Mat des_Right;
extractor.compute( Right, kp_Right, des_Right );
/////////////////
//Match
std::vector< vector< DMatch > > matches;
FlannBasedMatcher matcher;
matcher.knnMatch(des_Left, des_Right, matches, 2);
//matcher.knnMatch(des_Right, des_Left, matches, 2);
std::vector< DMatch > good_matches;
good_matches.reserve(matches.size());
for (size_t i = 0; i < matches.size(); ++i)
{
if (matches[i].size() < 2)
continue;
const DMatch &m1 = matches[i][0];
const DMatch &m2 = matches[i][1];
if(m1.distance <= 0.7 * m2.distance)
good_matches.push_back(m1);
}
//Draw only "good" matches
Mat img_matches;
drawMatches( Left, kp_Left, Right, kp_Right, good_matches,
img_matches, Scalar::all(-1), Scalar::all(-1),
vector< char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
imshow("Match", img_matches);
/////////////////
//Find H
if(good_matches.size() > 20 )
{
vector< Point2f > LeftMatchPT;
vector< Point2f > RightMatchPT;
for( unsigned int i = 0; i < good_matches.size(); i++ )
{
LeftMatchPT.push_back( kp_Left[ good_matches[i].queryIdx ].pt );
RightMatchPT.push_back( kp_Right[ good_matches[i].trainIdx ].pt );
}
H = findHomography( RightMatchPT, LeftMatchPT, CV_RANSAC );
}
return H;
}

OpenCV - How to match multi-instance of templates in image using C++

I need some help trying to match non-normal blood cell with original cell.
After some searches I found SURF and SIFT feature extraction.
I have this template;
and this original image;
This is my code:
int main(int argc, char** argv)
{
Mat img_scene = imread("d:\\projimg\\last.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_object = imread("d:\\projimg\\lasttmp.jpg", CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_object = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
//Mat img_scene = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 200;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector.detect(img_object, keypoints_object);
detector.detect(img_scene, keypoints_scene);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute(img_object, keypoints_object, descriptors_object);
extractor.compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
matches.resize(10);
sort(matches.begin(), matches.end());
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i <matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
imshow("ii", img_matches);
waitKey();
return 0;
}
Can anyone help me to match all similar templates ?
Thanks all

Feature points matching using SIFT between two images

I wonder why there is not good matching between two images using SIFT.
The matched images is shown below.
Original images are
My program is as follow.
int imagematching(Mat &img1, Mat & img2, std::vector<Point2f> &first_keypoints, std::vector<Point2f> &second_keypoints){
int max_keypoints = 500;
Ptr<SIFT> detector = SIFT::create();
Ptr<SIFT> extractor = SIFT::create();
//--Step 1: Key point detection
std::vector<KeyPoint> keypoints1, keypoints2;
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors1, descriptors2;
detector->detect( img1, keypoints1 );
detector->detect( img2, keypoints2 );
extractor->compute(img1, keypoints1, descriptors1);
extractor->compute(img2, keypoints2, descriptors2);
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
double max_dist = 0; double min_dist = 999999;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors1.rows; i++ )
{
double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors1.rows; i++ )
{
if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
matches.clear();
Mat img_matches;
drawMatches( img1, keypoints1, img2, keypoints2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
descriptors1.release();
descriptors2.release();
//-- Localize the object
//std::vector<Point2f> first_keypoints;
//std::vector<Point2f> second_keypoints;
for( int i = 0; i < good_matches.size(); i++ )
{
//cout << i << " :";
//-- Get the keypoints from the good matches
if( keypoints1[ good_matches[i].queryIdx ].pt.x > 0 && keypoints1[ good_matches[i].queryIdx ].pt.y > 0
&& keypoints2[ good_matches[i].trainIdx ].pt.x > 0 && keypoints2[ good_matches[i].trainIdx ].pt.y > 0){
first_keypoints.push_back( keypoints1[ good_matches[i].queryIdx ].pt );
//cout << "first point" << keypoints1[ good_matches[i].queryIdx ].pt << endl;
second_keypoints.push_back( keypoints2[ good_matches[i].trainIdx ].pt );
//cout << "second point" << keypoints2[ good_matches[i].trainIdx ].pt << endl;
}
}
keypoints1.clear();
keypoints2.clear();
good_matches.clear();
//-- Show detected matches
imshow( "Good Matches & Object detection", img_matches );
waitKey(0);
return SUCCESS;
}
SIFT might be rotation invariant, but it is definitely not perspective invariant.
You will need to add some machine learning - maybe SVM - to be able to match images with different perspective, SIFT features only are not enough.

Opencv stitching multiple images

I have been working on stitching multiple images in opencv in order to create a mosaic.
I followed this link on opencv:
Stitch multiple images using OpenCV (Python)
Here's the code that I have got so far :
// imgstch.cpp :
// imgstch.cpp :
//#include "stdafx.h"
#include<opencv/cv.h>
#include<opencv/highgui.h>
#include<iostream>
#include<stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#define _CRT_SECURE_NO_WARNINGS
using namespace std;
using namespace cv;
int main()
{
//-- Input the two images
cv::Mat img1;
std::vector<cv::KeyPoint> img1_keypoints;
cv::Mat img1_descriptors;
cv::Mat img2;
std::vector<cv::KeyPoint> img2_keypoints;
cv::Mat img2_descriptors;
img1 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b2.JPG");
img2 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b1.JPG");
//-- ORB feature detector, extractor and descriptor
int minHessian = 1800;
OrbFeatureDetector detector( minHessian );
OrbDescriptorExtractor extractor;
detector.detect(img1, img1_keypoints);
detector.detect(img2, img2_keypoints);
extractor.compute(img1, img1_keypoints, img1_descriptors);
extractor.compute(img2, img2_keypoints, img2_descriptors);
//-- Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_HAMMING);
std::vector< DMatch > matches;
matcher.match( img1_descriptors, img2_descriptors, matches );
imshow("image1", img1);
imshow("image2",img2);
//-- Draw matches
Mat img_matches;
drawMatches( img1, img1_keypoints, img2, img2_keypoints, matches, img_matches );
//-- Show detected matches
imshow("Matches", img_matches );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches);
double max_dist = 0; double min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p1, p2;
for (unsigned int i = 0; i < matches.size(); i++) {
p1.push_back(img1_keypoints[matches[i].queryIdx].pt);
p2.push_back(img2_keypoints[matches[i].trainIdx].pt);
}
// HomografĂ­a
vector<unsigned char> match_mask;
Mat H = findHomography(p1,p2,CV_RANSAC);
cout << "H = "<< endl << " " << H << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result;
result=img1.clone();
warpPerspective(img1,result,H,cv::Size(img1.cols+img2.cols,img1.rows));
cv::Mat half(result,cv::Rect(0,0,img2.cols,img2.rows));
img2.copyTo(half);
imwrite("/home/ishita/img_stitch/result.jpg",result);
imshow( "Result", result );
//for images 2 and 3
cv::Mat img3;
std::vector<cv::KeyPoint> img3_keypoints;
cv::Mat img3_descriptors;
img3 = cv::imread("/home/ishita/Downloads/ishita/Downloads/Mosaic/b3.JPG");
//detector.detect(img2, img2_keypoints);
detector.detect(img3, img3_keypoints);
//extractor.compute(img2, img2_keypoints, img2_descriptors);
extractor.compute(img3, img3_keypoints, img3_descriptors);
matcher.match( img1_descriptors, img3_descriptors, matches );
//imshow("image2", img1);
imshow("image3",img3);
//-- Draw matches
Mat img_matches2;
drawMatches( img1, img1_keypoints, img3, img3_keypoints, matches, img_matches2 );
//-- Show detected matches
imshow("Matches2", img_matches2 );imwrite("/home/ishita/img_stitch/img_matches.jpg",img_matches2);
max_dist = 0; min_dist = 10;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < matches.size(); i++ )
{
double dist = matches[i].distance;
if( dist < min_dist && dist >3)
{
min_dist = dist;
}
if( dist > max_dist) max_dist = dist;
}
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches2;
for( int i = 0; i < matches.size(); i++ )
{
//cout<<matches[i].distance<<endl;
if( matches[i].distance < 3*min_dist && matches[i].distance > 3)
{
good_matches2.push_back( matches[i]); }
}
//calculate the Homography
vector<Point2f> p3, p4;
for (unsigned int i = 0; i < matches.size(); i++) {
p3.push_back(img1_keypoints[matches[i].queryIdx].pt);
p4.push_back(img3_keypoints[matches[i].trainIdx].pt);
}
// HomografĂ­a
vector<unsigned char> match_mask2;
Mat H2 = findHomography(p3,p4,CV_RANSAC);
Mat H3 = H * H2;
cout << "H2= "<< endl << " " << H2 << endl << endl;
// Use the Homography Matrix to warp the images
cv::Mat result2;
result2 = result.clone();
warpPerspective(result,result2,H3,cv::Size(img3.cols+result.cols,result.rows));
cv::Mat half2(result,cv::Rect(0,0,img3.cols,img3.rows));
img3.copyTo(half2);
imwrite("/home/ishita/img_stitch/result.jpg",result2);
imshow( "Result2", result2 );
waitKey(0);
return 0;
}
The result of stitching first two images is as required but the result of stitching the third image is not appropriate.
What could be possibly wrong with the logic behind this or in the method's implementation?
The images and the result can be found here :
https://drive.google.com/folderview?id=0BxXVoeIUgVW7fnFMbExhTzN4QnRueXZpQmpILTZCWFZoTlZEckxfWV83VjkxMmFNSXdLVWM&usp=sharing
this answer is too late but may be helpful for other.
in some algorithms dimension of image to be stitching must be same.
(if problem is this test below solution)
1- you could run program once for (img1 , img2)=>result1.
2- then for (img2 , img3)=>result2.
3- (result1 , result2) => final
here is other example that get all images at once:
https://www.geeksforgeeks.org/stitching-input-images-panorama-using-opencv-c/

How to Access Points location on OpenCV Matcher?

I am using this FLANN matcher algorithm to match interest points in 2 pictures the code is displayed below).
There is a moment when the code finds a list of matched points:
std::vector<DMatch> good_matches;
I would like to get the points localization (x,y) in both pictures. To create a displacement map. How could I access these points localization?
Cheers,
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
void readme();
/** #function main */
int main(int argc, char** argv) {
if (argc != 3) {
readme();
return -1;
}
// Transform in GrayScale
Mat img_1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img_2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
// Checks if the image could be loaded
if (!img_1.data || !img_2.data) {
std::cout << " --(!) Error reading images " << std::endl;
return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector<DMatch> matches;
matcher.match(descriptors_1, descriptors_2, matches);
double max_dist = 0;
double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_1.rows; i++) {
double dist = matches[i].distance;
// printf("-- DISTANCE = [%f]\n", dist);
if (dist < min_dist)
min_dist = dist;
if (dist > max_dist)
max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist )
//-- PS.- radiusMatch can also be used here.
std::vector<DMatch> good_matches;
for (int i = 0; i < descriptors_1.rows; i++) {
if (matches[i].distance < 2 * min_dist) {
good_matches.push_back(matches[i]);
}
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches(img_1, keypoints_1, img_2, keypoints_2, good_matches,
img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Show detected matches
imshow("Good Matches", img_matches);
for (int i = 0; i < good_matches.size(); i++) {
printf("-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i,
good_matches[i].queryIdx, good_matches[i].trainIdx);
}
waitKey(0);
return 0;
}
/** #function readme */
void readme() {
std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl;
}
matched_points1 and 2 will be the corresponding points in the left and right images. Then, you can find the indices of the good_matches with idx1=good_matches[i].trainIdx for the left image and idx2=good_matches[i].queryIdx for the right image. Then just add the corresponding points to your matched_points vector to obtain the x,y point vector of the matches.
long num_matches = good_matches.size();
vector<Point2f> matched_points1;
vector<Point2f> matched_points2;
for (int i=0;i<num_matches;i++)
{
int idx1=good_matches[i].trainIdx;
int idx2=good_matches[i].queryIdx;
matched_points1.push_back(points1[idx1]);
matched_points2.push_back(points2[idx2]);
}
Now you have two vectors of the matched points. I think that's what you're asking?