I tried the tutorial about feature description on opencv.com and the program crashes with the error:
Debug assertion failed , Expression: vector iterator outside range
in the line_
detector.detect(img_1, keypoints_1);
I'm using OpenCV 2.4.11, with Visual Studio 2010, and I'm linking to opencv_<module>2411d.lib libraries in library directory C:\opencv\build\x86\vc10\lib
The code is here :
#include <iostream>
#include <stdio.h>
#include <opencv2\core\core.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\nonfree\features2d.hpp>
#include <opencv2\legacy\legacy.hpp>
using namespace cv;
using namespace std;
int main(int argc, char ** argv)
{
Mat img_1 = imread("3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_2 = imread("2.jpg", CV_LOAD_IMAGE_GRAYSCALE);
if (!img_1.data || !img_2.data)
{
cout << " Nu au fost afisate imaginile" << endl;
return -1;
}
int minHessian = 400;
imshow("1.png", img_1);
imshow("2.png", img_2);
SurfFeatureDetector detector(minHessian);
vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2);
vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
//-- Draw matches
Mat img_matches;
drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_matches);
//-- Show detected matches
imshow("Matches", img_matches);
waitKey(0);
return 0;
}
and here is when it starts to crash:
Related
I'm trying to get the match feature points from two images, for further processing. I wrote the following code by referring an example of a SURF Feature Matching by FLANN, but in ORB.
here is the code:
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2D.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat im_left, im_right;
Mat descriptor_1, descriptor_2;
vector<KeyPoint> keypoints_1, keypoints_2;
im_left = imread("im_left.png", IMREAD_GRAYSCALE);
im_left = imread("im_right.png", IMREAD_GRAYSCALE);
Ptr<ORB> detector = ORB::create();
vector<DMatch> matches;
FlannBasedMatcher matcher;
Ptr<DescriptorExtractor> extractor;
detector->detect(im_right, keypoints_1, descriptor_1);
detector->detect(im_left, keypoints_2, descriptor_2);
matcher.match(descriptor_1, descriptor_2, matches);
Mat img_match;
drawMatches(im_left, keypoints_1, im_right, keypoints_2, matches, img_match);
imshow("Matches", img_match);
waitKey(10000);
return 0;
}
But this throws an exception error saying:
Unhandled exception at 0x00007FF97D3B9E08 in Project1.exe: Microsoft C++ exception: cv::Exception at memory location 0x0000009E5D4FE3B0. occurred
May be my code is full of nonsense, appreciate if someone can help me out on solving this.
ORB is a binary descriptor and needs a different (Hamming distance) matcher for that:
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
(taken from: https://docs.opencv.org/3.4.1/dc/d16/tutorial_akaze_tracking.html)
im_left = imread("im_left.png", IMREAD_GRAYSCALE);
im_left = imread("im_right.png", IMREAD_GRAYSCALE);
You have read images into the same variable twice.
Using this code to find matches between images:
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/xfeatures2d/nonfree.hpp>
#include <opencv2/xfeatures2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main(int argc, char *argv[]) {
//cv::initModule_nonfree();
//initModule_features2d();
Mat img_1 = imread("C:/Users/Dan/Desktop/0.jpg", 1);
Mat img_2 = imread("C:/Users/Dan/Desktop/0.jpg", 1);
cv::Ptr<Feature2D> f2d = xfeatures2d::SIFT::create();
//-- Step 1: Detect the keypoints:
std::vector<KeyPoint> keypoints_1, keypoints_2;
f2d->detect(img_1, keypoints_1);
f2d->detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_1, descriptors_2;
f2d->compute(img_1, keypoints_1, descriptors_1);
f2d->compute(img_2, keypoints_2, descriptors_2);
Mat out0;
drawKeypoints(img_1, keypoints_1, out0);
imshow("KeyPoint0.jpg", out0);
//-- Step 3: Matching descriptor vectors using BFMatcher :
BFMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
Mat img_matches = Mat::zeros( img_1.size(), CV_8UC3 );
drawMatches(img_1,keypoints_1,img_2,keypoints_2,matches,img_matches);
imshow("matches", img_matches);
waitKey(0); // Keep window there until user presses 'q' to quit.
return 0;
}
Since OpenCV 3.1 functions were changed, I looked for example code using SURF or SIFT, but could not find any.
How to modify this code so it will draw contours around detected objects similar to OpenCV version?
You will need to use findHomography to get the transformation that relates your training image (img_1) to the image to be detected (img_2)
Then you can simply do a perspectiveTransform on a bounding box of your training image (at origin) using the homography obtained, to place the correct bounding box on the detected image
Related code taken from ORB detection example
Mat inlier_mask, homography;
vector<KeyPoint> inliers1, inliers2;
vector<DMatch> inlier_matches;
if(matched1.size() >= 4) {
homography = findHomography(Points(matched1), Points(matched2),
RANSAC, ransac_thresh, inlier_mask);
}
for(unsigned i = 0; i < matched1.size(); i++) {
if(inlier_mask.at<uchar>(i)) {
int new_i = static_cast<int>(inliers1.size());
inliers1.push_back(matched1[i]);
inliers2.push_back(matched2[i]);
inlier_matches.push_back(DMatch(new_i, new_i, 0));
}
}
stats.inliers = (int)inliers1.size();
stats.ratio = stats.inliers * 1.0 / stats.matches;
vector<Point2f> new_bb;
perspectiveTransform(object_bb, new_bb, homography);
Mat frame_with_bb = frame.clone();
if(stats.inliers >= bb_min_inliers) {
drawBoundingBox(frame_with_bb, new_bb);
}
Mat res;
drawMatches(first_frame, inliers1, frame_with_bb, inliers2,
inlier_matches, res,
Scalar(255, 0, 0), Scalar(255, 0, 0));
I'm trying using SURF feature detector but I'm always getting this error-
The program '[1120] Corner Detection.exe' has exited with code -1073741819 (0xc0000005) 'Access violation'.
Here is the code
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include <iostream>
#include <stdlib.h>
#include <vector>
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2\nonfree\nonfree.hpp"
using namespace cv;
using namespace std;
void foo(Mat &image1, Mat &image2)
{
int minHeassian = 400;
SurfFeatureDetector detector(minHeassian);
std::vector< KeyPoint > keypoints1, keypoints2;
keypoints1.resize(1000);
keypoints2.resize(1000);
detector.detect(image1, keypoints1); // <--- crashes at this line
detector.detect(image2, keypoints2); // <--- probably will crash here too
SurfDescriptorExtractor extractor;
Mat discriptors1, discriptors2;
extractor.compute(image1, keypoints1, discriptors1);
extractor.compute(image2, keypoints2, discriptors2);
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(discriptors1, discriptors2, matches);
double minDist = 100;
for (int i = 0; i < matches.size(); ++i)
if (matches[i].distance < minDist)
minDist = matches[i].distance;
std::vector< DMatch > goodMatches;
for (int i = 0; i < matches.size(); ++i)
if (matches[i].distance <= max(0.02, (double)matches[i].distance))
goodMatches.push_back(matches[i]);
Mat matchImage;
drawMatches(image1, keypoints1, image2, keypoints2,
goodMatches, matchImage,
Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
namedWindow("Matches", WINDOW_AUTOSIZE);
imshow("Matches", matchImage);
waitKey(0);
}
int _tmain(int argc, _TCHAR* argv[])
{
cv::initModule_nonfree();
Mat left, right;
right = imread("D:\\right.jpg", IMREAD_COLOR);
left = imread("D:\\left.jpg", IMREAD_COLOR);
foo(left, right);
return 0;
}
I get the error at the line
detector.detect(image1, keypoints1);
I have following lib files mentioned to linker-
opencv_core249d.lib
opencv_imgproc249d.lib
opencv_highgui249d.lib
opencv_ml249d.lib
opencv_video249d.lib
opencv_features2d249d.lib
opencv_calib3d249d.lib
opencv_objdetect249d.lib
opencv_contrib249d.lib
opencv_legacy249d.lib
opencv_flann249d.lib
opencv_features2d249.lib
opencv_nonfree249.lib
I have tried everything I found on the Internet but nothing worked. What is wrong with this code?
I'm running VS 2013 on Windows 8.1 and I'm using OpenCV version 2.4.9.
Solved
It was a silly mistake. I used the library opencv_nonfree249.lib whereas I should be using opencv_nonfree249**d**.lib as I was working in debug mode.
You are using the library opencv_nonfree249.lib in a debug environment which is meant for release environment. Add a d to the lib name to make it opencv_nonfree249d.lib which will work for debug environment.
I want to try the new class FREAK in OpenCV 2.4.2.
I tried to use common interface of feature detector to construct FREAK, but,of course, it doesn't work. How should I revise my code to get result?
#include <stdio.h>
#include <iostream>
#include <opencv\cxcore.h>
#include <opencv2\nonfree\features2d.hpp>
#include <opencv\highgui.h>
#include <opencv2\features2d\features2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main(){
Mat mat1;
mat1 = imread("Testimg06.jpg",0);
vector<KeyPoint> P1;
Ptr<FeatureDetector> freakdes;
Ptr<DescriptorExtractor> descriptorExtractor;
freakdes = FeatureDetector::create("FREAK");
freakdes->detect(mat1,P1);
Mat keypoint_img;
drawKeypoints( mat1, P1, keypoint_img, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
imshow("Keypoints 1", keypoint_img );
cvWaitKey(0);
}
FREAK is descriptor only. There is no corresponding feature detector.
So you need to combine it with one of the available detectors: FAST, ORB, SIFT, SURF, MSER or use goodFeaturesToTrack function.
There is an OpenCV example that shows how to use FREAK combined with FAST.
The basic instructions are:
FREAK extractor;
BruteForceMatcher<Hamming> matcher;
std::vector<KeyPoint> keypointsA, keypointsB;
Mat descriptorsA, descriptorsB;
std::vector<DMatch> matches;
FAST(imgA,keypointsA,10);
FAST(imgB,keypointsB,10);
extractor.compute( imgA, keypointsA, descriptorsA );
extractor.compute( imgB, keypointsB, descriptorsB );
matcher.match(descriptorsA, descriptorsB, matches);
I'm trying to stitch together some images to make a sort of panorama. I'm using OpenCV so first thing to do is detect keypoints and descriptors than matching them. To do that I'm following this tutorial: http://opencv.itseez.com/doc/user_guide/ug_features2d.html
But during debug I get a std::bad_alloc exception relative to this line:
matcher.match(descriptors1, descriptors2, matches);
Somebody can help me with that? Because I cutted & pasted the tutorial and there are no compilation errors.
Thanks.
G
Complete code:
Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
if(img1.empty() || img2.empty())
{
printf("Can't read one of the images\n");
return -1;
}
// detecting keypoints
SurfFeatureDetector detector(400);
vector<KeyPoint> keypoints1, keypoints2;
detector.detect(img1, keypoints1);
detector.detect(img2, keypoints2);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1, descriptors2;
extractor.compute(img1, keypoints1, descriptors1);
extractor.compute(img2, keypoints2, descriptors2);
// matching descriptors
BruteForceMatcher<L2<float> > matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
// drawing the results
namedWindow("matches", 1);
Mat img_matches;
drawMatches(img1, keypoints1, img2, keypoints2, matches, img_matches);
imshow("matches", img_matches);
waitKey(0);
Update:
if I run this code, I get a:
Run-Time Check Failure #2 - Stack around the variable 'keypoints1' was corrupted.
Code:
#include "opencv\cv.h"
#include "opencv\highgui.h"
using namespace cv;
using namespace std;
int main()
{
Mat img1 = imread("Chessboard1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img2 = imread("Chessboard3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
if(img1.empty() || img2.empty())
{
printf("Can't read one of the images\n");
return -1;
}
FastFeatureDetector detector(50);
vector<KeyPoint> keypoints1;
detector.detect(img1, keypoints1);
return 0;
}
You need ensure that the following "Additional Dependencies" under the the Properties->Linker->Input are referring to the correct OpenCV libraries with debugger support.
i.e.
C:\OpenCV2.2\lib\opencv_calib3d220d.lib
C:\OpenCV2.2\lib\opencv_core220d.lib
C:\OpenCV2.2\lib\opencv_features2d220d.lib
C:\OpenCV2.2\lib\opencv_highgui220d.lib
C:\OpenCV2.2\lib\opencv_imgproc220d.lib
instead of
C:\OpenCV2.2\lib\opencv_calib3d220.lib
C:\OpenCV2.2\lib\opencv_core220.lib
C:\OpenCV2.2\lib\opencv_features2d220.lib
C:\OpenCV2.2\lib\opencv_highgui220.lib
C:\OpenCV2.2\lib\opencv_imgproc220.lib