Automatic Image Stitching from video with opencv - c++

Hy, i have error when i stitch frame from video,
here's my code
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace std;
using namespace cv;
Mat Stitching(Mat image1,Mat image2){
Mat gray_image1;
Mat gray_image2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 10;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective(image1,result,H,cv::Size(800,600));
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);
//imshow( "Result", result );
return result;
}
/** #function main */
int main( int argc, char** argv )
{
// Load the images
//Mat image1= imread( "E:\\Tugas Akhir\\image\\city2.jpg" );
//Mat image2= imread( "E:\\Tugas Akhir\\image\\city1.jpg" );
char *fileName = "E:\\Tugas Akhir\\Video Master\\indv_img_3a.avi";
/* Create a window */
cvNamedWindow("Stitching", CV_WINDOW_AUTOSIZE);
/* capture frame from video file */
CvCapture* capture = cvCreateFileCapture(fileName);
/* Create IplImage to point to each frame */
IplImage* frame;
IplImage before_frame;
Mat image1;
Mat image2;
cv::Mat result;
/* Loop until frame ended or ESC is pressed */
int loop=0;
//imshow( "Result", Stitching(image1,image2));
while(1) {
frame = cvQueryFrame(capture);
if(loop>0){
if(!frame) break;
image2=Mat(frame, false);
result=Stitching(image1,image2);
before_frame=result;
frame=&before_frame;
image1=result;
image2.release();
//imshow("Stitching",frame);
cvShowImage("Stitching",frame);
//break;
}else if(loop==0){
//Mat aimage1(frame);
image1=Mat(frame, false);
}
loop++;
char c = cvWaitKey(33);
if(c==27) break;
}
cvReleaseCapture(&capture);
/* delete window */
// cvDestroyWindow("Stitching");
// return EXIT_SUCCESS;
waitKey(0);
return 0;
}
if i load from image file, it works, image stitched, but when i try to stitch image from every video frame , it shows error
First-chance exception at 0x000007f886dd64a8 in matchingHomography.exe: Microsoft C++ exception: cv::Exception at memory location 0x0080e3b0..
Unhandled exception at 0x000007f886dd64a8 in matchingHomography.exe: Microsoft C++ exception: cv::Exception at memory location 0x0080e3b0..
line error
Mat H = findHomography( obj, scene, CV_RANSAC );
what's the error mean? and how to solve it
thanks

First off, you seem to be mixing C and C++ interfaces of OpenCV (OpenCV VideoCapture doc). For better readability stick to one of them (Since you are using C++ just stick to using C++ functions).
Since loading from image works, but Video doesn't, your video loading is probably the problem.
Try using cv::imshow("testWindow", frame) to show the frame loaded from video. Most likely there was no frame loaded.
One possible cause is that the video file is encoded in a format not supported by OpenCV. To check you can also run grab() and then retrieve(). The grab function will return if it was successful or not. Try grabbing a couple of frames, if all of them fail you probably don't have the necessary codec to decode this video.

Related

Fail to Load .xml file when trying to run OpenCv objectDetection

I was trying to run the facial detection with OpenCV3.0 sample c++ code under macOS Sierria version 10.12.5. However, I failed the load the ".xml" file. I tried using relative path as well as absolute path to "haarcascade_frontalface_alt.xml" and "haarcascade_eye_tree_eyeglasses.xml" run it, but it did not solve the problem for me. I compiled the code successfully but got error message:
--(!)Error loading face cascade
I think there's some problem to do with the load method of the CascadeClassifier. I read about some threads. But none gives specific instructions on solving this matter. I beg for specific instructions on solving this. Thank you all so much!
Here is the sample code:
#include "opencv2/objdetect.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "/my/path/to/haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "/my/path/to/haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
String window_name = "Capture - Face detection";
/** #function main */
int main( int argc, const char** argv )
{
// CommandLineParser parser(argc, argv,
// "{help h||}"
// "{face_cascade|../../data/haarcascades/haarcascade_frontalface_alt.xml|}"
// "{eyes_cascade|../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml|}");
// cout << "\nThis program demonstrates using the cv::CascadeClassifier class to detect objects (Face + eyes) in a video stream.\n"
// "You can use Haar or LBP features.\n\n";
// parser.printMessage();
// face_cascade_name = parser.get<string>("face_cascade");
// eyes_cascade_name = parser.get<string>("eyes_cascade");
VideoCapture capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading eyes cascade\n"); return -1; };
//-- 2. Read the video stream
capture.open( 0 );
if ( ! capture.isOpened() ) { printf("--(!)Error opening video capture\n"); return -1; }
while ( capture.read(frame) )
{
if( frame.empty() )
{
printf(" --(!) No captured frame -- Break!");
break;
}
//-- 3. Apply the classifier to the frame
detectAndDisplay( frame );
char c = (char)waitKey(10);
if( c == 27 ) { break; } // escape
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, COLOR_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width/2, faces[i].y + faces[i].height/2 );
ellipse( frame, center, Size( faces[i].width/2, faces[i].height/2 ), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CASCADE_SCALE_IMAGE, Size(30, 30) );
for ( size_t j = 0; j < eyes.size(); j++ )
{
Point eye_center( faces[i].x + eyes[j].x + eyes[j].width/2, faces[i].y + eyes[j].y + eyes[j].height/2 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, eye_center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}

cv::drawMatches(...) not drawing

I've been trying to make a SURF implementation work and I have been having some trouble, now I finally think i got it 'right' but I have one small problem.
The problem is pretty straight forward and I am sure its something simple but I can't solve it. The image displays right and everything but the matches are not drawn. Here is my code.
#include <stdio.h>
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/opencv.hpp>
#include "opencv2/features2d/features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv ){
if ( argc != 3 ) {
printf("usage: DisplayImage.out <Image1_Path> <Image2_Path>\n");
return -1;
}
Mat image, image2;
image = imread( argv[1], 0 );
image2 = imread(argv[2], 0);
if ( !image.data ){
printf("No image data \n");
return -1;
}
if ( !image2.data ){
printf("No image data \n");
return -1;
}
/*
Ptr<FeatureDetector> detector = FastFeatureDetector::create(15);
vector<KeyPoint> keypoints1, keypoints2;
detector->detect(image, keypoints1);
detector->detect(image2, keypoints2);
Ptr<xfeatures2d::SURF> extractor = xfeatures2d::SURF::create();
Mat descriptors1, descriptors2;
extractor->compute(image, keypoints1, descriptors1);
extractor->compute(image2, keypoints2, descriptors2);
BFMatcher::BFMatcher matcher(L2<float>);
//BruteForceMatcher<L2<float> > matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
*/
Ptr<xfeatures2d::SURF> surf = cv::xfeatures2d::SURF::create();
vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
surf->detectAndCompute(image,noArray(), keypoints1, descriptors1, true);
//BFMatcher::BFMatcher matcher(L2<float>);
BFMatcher::BFMatcher matcher(NORM_HAMMING);
vector<DMatch> matches;
Mat res;
matcher.match(descriptors1,descriptors2, matches);
drawMatches(image, keypoints1, image2, keypoints2, matches, res);
namedWindow("Display Image", WINDOW_AUTOSIZE );
imshow("Display Image", res);
waitKey(0);
return 0;
}

Haar detection - save Mat of image in order to get and show previous frame

I am using Haar detection in my hobby project, the detection is done on a video stream. Once Haar detects something I imshow it, this is how it looks like: Mat faceROI = frame_gray(faces[i]);
imshow( "Detection", faceROI);
While the video is running I am getting detections and the Mat is getting updated/overwritten with a new image of the object. What I want to do now is to save Mat so when a new detections occure I get both the previous and current frame. I'll guess I have to save the Mat in some way and then update it so current -> previous and so on.
imshow( "Previous detection", previousROI);` <- want to be able to do this
In case you want to see the whole code, I am doing this: http://docs.opencv.org/2.4/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.html
Help is much appreciated!
You might have and easier time if you don't split the detect/display into a separate function. I've modified the OpenCV documentation code below. Keep in mind I haven't compiled or run this so there may be some errors, but it should give you an idea of a different way to address the issue.
/** #function main */
int main( int argc, const char** argv )
{
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
//store faces here
std::vector<Mat> prev_faces;
std::vector<Mat> current_faces;
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Mat faceROI = frame_gray( faces[i] );
current_faces.push_back(faceROI); //adds all of the current detections to a vector
}
if (prev_faces.size() > 0 && current_faces.size() > 0)
{
// do stuff with prev_faces and current_faces
// for(int i = 0; i < prev_faces.size(); i++){
// imshow("previous", prev_faces[i])
// }
// for(int i = 0; i < prev_faces.size(); i++){
// imshow("current", current_faces[i])
// }
// imshow("stuff", other_cool_Mats_I_made_by_analysing_and_comparing_prev_and_current)
}
prev_faces = current_faces;
current_faces.erase(current_faces.begin(), current_faces.end());
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}

OpenCV Help - Error: no operator "=" Matches these operands. operand types are cv::Mat = IplImage*

I have recently started learning how to use OpenCV and i have been following the tutorials hosted by their website. I am using OpenCV 3.0, however, it seems some of the tutorial information is out of date.
I and on the tutorial "Cascade Classifier" link:
http://www.docs.opencv.org/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.html
They provided example code is not running for me and I cannot understand why. I have provided the code example below:
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
/** Function Headers */
void detectAndDisplay( Mat frame );
/** Global variables */
String face_cascade_name = "haarcascade_frontalface_alt.xml";
String eyes_cascade_name = "haarcascade_eye_tree_eyeglasses.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
/** #function main */
int main( int argc, const char** argv )
{
CvCapture* capture;
Mat frame;
//-- 1. Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
if( !eyes_cascade.load( eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; };
//-- 2. Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture )
{
while( true )
{
frame = cvQueryFrame( capture );
//-- 3. Apply the classifier to the frame
if( !frame.empty() )
{ detectAndDisplay( frame ); }
else
{ printf(" --(!) No captured frame -- Break!"); break; }
int c = waitKey(10);
if( (char)c == 'c' ) { break; }
}
}
return 0;
}
/** #function detectAndDisplay */
void detectAndDisplay( Mat frame )
{
std::vector<Rect> faces;
Mat frame_gray;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
The error for me arises on line 38 "frame = cvQueryFrame( capture );"
the "=" is underlined red and gives the error message displayed in the title of the question
I'm sorry if the code is not displayed correctly, this is my first time asking a question.
no fear, it's not exactly your fault, - you stumbled over outdated tutorial code, the arcane c-api is no more adequate today(and won't work with 3.0).
please replace :
CvCapture* capture;
capture = cvCaptureFromCAM( -1 );
if( capture )
{
with:
VideoCapture capture(-1);
if (capture.isOpened())
{
and:
frame = cvQueryFrame( capture );
with:
capture.read(frame);
also, opencv3.0 docs: http://docs.opencv.org/ref/master/
(your sample code is from 2.4.x)

SURF Feature extraction and Keypoint match based on FlannBasedMatcher

Following is my code, which used to extract the features using SURF and which will match the points using flannBasedMatcher.
Mat object = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
if( !object.data )
{
// std::cout<< "Error reading object " << std::endl;
return -2;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
char key = 'a';
//VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
Mat image = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch >> matches;
std::vector<std::vector<cv::DMatch>> matches1;
std::vector<std::vector<cv::DMatch>> matches2;
std::vector<cv::DMatch> matches3;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
//cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
printf("Images matching %d , %d", good_matches.size(), kp_object.size());
//return 1;
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
//H = findHomography( obj, scene, CV_RANSAC );
//printf("Size : %d", H.size());
//perspectiveTransform( obj_corners, scene_corners, H);
//printf("Size : %d --- %d --- %d", H.size(), scene_corners.size());
}else{
printf("Images matching %d , %d", good_matches.size(), kp_object.size());
}
//Show detected matches
imshow( "Good Matches", img_matches );
waitKey(0);
return 0;
In this code i want to know what exactly happens through this method
matcher.knnMatch(des_object, des_image, matches, 2);
As i know i passes the two descriptors of the matching images and the matches vector is filled with 2 nearest neighbors. I want to know what exactly happens in the method and how the matches method is filled and what points are filled to it.
In this code segment
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
im using the nearest neibour distance ratio(nndr) as 0.6, i wanted to know how the good_matches are find out and how the nndr value change will effect.
It would be a great help, if i could resolve this code.
Thanks.
The FlannBasedMatcher is based on the paper written by Muja et. al.; you can find the exact algorithm and how they go about it there..
Regarding the good_matches, you just saw in the code snippet itself that it is a collection of the best matches your result has based on the criteria, i.e., nndr.. It is basically a threshold that decides how far a match is allowed before dropping the match altogether.. Higher the threshold, more points are considered and more are the number of positive matches (whether they are true positives or not will be determined by your dataset and the way you have set the nndr level)..
Hope this helps.