I'm trying using SURF feature detector but I'm always getting this error-
The program '[1120] Corner Detection.exe' has exited with code -1073741819 (0xc0000005) 'Access violation'.
Here is the code
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include <iostream>
#include <stdlib.h>
#include <vector>
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2\nonfree\nonfree.hpp"
using namespace cv;
using namespace std;
void foo(Mat &image1, Mat &image2)
{
int minHeassian = 400;
SurfFeatureDetector detector(minHeassian);
std::vector< KeyPoint > keypoints1, keypoints2;
keypoints1.resize(1000);
keypoints2.resize(1000);
detector.detect(image1, keypoints1); // <--- crashes at this line
detector.detect(image2, keypoints2); // <--- probably will crash here too
SurfDescriptorExtractor extractor;
Mat discriptors1, discriptors2;
extractor.compute(image1, keypoints1, discriptors1);
extractor.compute(image2, keypoints2, discriptors2);
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match(discriptors1, discriptors2, matches);
double minDist = 100;
for (int i = 0; i < matches.size(); ++i)
if (matches[i].distance < minDist)
minDist = matches[i].distance;
std::vector< DMatch > goodMatches;
for (int i = 0; i < matches.size(); ++i)
if (matches[i].distance <= max(0.02, (double)matches[i].distance))
goodMatches.push_back(matches[i]);
Mat matchImage;
drawMatches(image1, keypoints1, image2, keypoints2,
goodMatches, matchImage,
Scalar::all(-1), Scalar::all(-1), vector<char>(),
DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
namedWindow("Matches", WINDOW_AUTOSIZE);
imshow("Matches", matchImage);
waitKey(0);
}
int _tmain(int argc, _TCHAR* argv[])
{
cv::initModule_nonfree();
Mat left, right;
right = imread("D:\\right.jpg", IMREAD_COLOR);
left = imread("D:\\left.jpg", IMREAD_COLOR);
foo(left, right);
return 0;
}
I get the error at the line
detector.detect(image1, keypoints1);
I have following lib files mentioned to linker-
opencv_core249d.lib
opencv_imgproc249d.lib
opencv_highgui249d.lib
opencv_ml249d.lib
opencv_video249d.lib
opencv_features2d249d.lib
opencv_calib3d249d.lib
opencv_objdetect249d.lib
opencv_contrib249d.lib
opencv_legacy249d.lib
opencv_flann249d.lib
opencv_features2d249.lib
opencv_nonfree249.lib
I have tried everything I found on the Internet but nothing worked. What is wrong with this code?
I'm running VS 2013 on Windows 8.1 and I'm using OpenCV version 2.4.9.
Solved
It was a silly mistake. I used the library opencv_nonfree249.lib whereas I should be using opencv_nonfree249**d**.lib as I was working in debug mode.
You are using the library opencv_nonfree249.lib in a debug environment which is meant for release environment. Add a d to the lib name to make it opencv_nonfree249d.lib which will work for debug environment.
Related
I'm trying to get the match feature points from two images, for further processing. I wrote the following code by referring an example of a SURF Feature Matching by FLANN, but in ORB.
here is the code:
#include "opencv2/video/tracking.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/features2D.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
Mat im_left, im_right;
Mat descriptor_1, descriptor_2;
vector<KeyPoint> keypoints_1, keypoints_2;
im_left = imread("im_left.png", IMREAD_GRAYSCALE);
im_left = imread("im_right.png", IMREAD_GRAYSCALE);
Ptr<ORB> detector = ORB::create();
vector<DMatch> matches;
FlannBasedMatcher matcher;
Ptr<DescriptorExtractor> extractor;
detector->detect(im_right, keypoints_1, descriptor_1);
detector->detect(im_left, keypoints_2, descriptor_2);
matcher.match(descriptor_1, descriptor_2, matches);
Mat img_match;
drawMatches(im_left, keypoints_1, im_right, keypoints_2, matches, img_match);
imshow("Matches", img_match);
waitKey(10000);
return 0;
}
But this throws an exception error saying:
Unhandled exception at 0x00007FF97D3B9E08 in Project1.exe: Microsoft C++ exception: cv::Exception at memory location 0x0000009E5D4FE3B0. occurred
May be my code is full of nonsense, appreciate if someone can help me out on solving this.
ORB is a binary descriptor and needs a different (Hamming distance) matcher for that:
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
(taken from: https://docs.opencv.org/3.4.1/dc/d16/tutorial_akaze_tracking.html)
im_left = imread("im_left.png", IMREAD_GRAYSCALE);
im_left = imread("im_right.png", IMREAD_GRAYSCALE);
You have read images into the same variable twice.
I tried the tutorial about feature description on opencv.com and the program crashes with the error:
Debug assertion failed , Expression: vector iterator outside range
in the line_
detector.detect(img_1, keypoints_1);
I'm using OpenCV 2.4.11, with Visual Studio 2010, and I'm linking to opencv_<module>2411d.lib libraries in library directory C:\opencv\build\x86\vc10\lib
The code is here :
#include <iostream>
#include <stdio.h>
#include <opencv2\core\core.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\nonfree\features2d.hpp>
#include <opencv2\legacy\legacy.hpp>
using namespace cv;
using namespace std;
int main(int argc, char ** argv)
{
Mat img_1 = imread("3.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_2 = imread("2.jpg", CV_LOAD_IMAGE_GRAYSCALE);
if (!img_1.data || !img_2.data)
{
cout << " Nu au fost afisate imaginile" << endl;
return -1;
}
int minHessian = 400;
imshow("1.png", img_1);
imshow("2.png", img_2);
SurfFeatureDetector detector(minHessian);
vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect(img_1, keypoints_1);
detector.detect(img_2, keypoints_2);
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute(img_1, keypoints_1, descriptors_1);
extractor.compute(img_2, keypoints_2, descriptors_2);
//-- Step 3: Matching descriptor vectors with a brute force matcher
BFMatcher matcher(NORM_L2);
vector< DMatch > matches;
matcher.match(descriptors_1, descriptors_2, matches);
//-- Draw matches
Mat img_matches;
drawMatches(img_1, keypoints_1, img_2, keypoints_2, matches, img_matches);
//-- Show detected matches
imshow("Matches", img_matches);
waitKey(0);
return 0;
}
and here is when it starts to crash:
I am trying to set up a bag of visual words using openCV 3.0. I have looked a bit everywhere and all I seem to be able to find is code that is only compatible with versions in the 2.x domain. As of now this is what I have:
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <iostream>
#include <stdio.h>
using namespace std;
using namespace cv;
int main(int argc, const char** argv) {
Ptr<FeatureDetector> features;
Ptr<DescriptorExtractor> descriptors;
Ptr<DescriptorMatcher> matcher;
int MAX_ITER = 100;
int EPS = 2;
TermCriteria tc(MAX_ITER + EPS,1,0.001);
int dictSize = 1000;
int retries = 1;
int flags = KMEANS_PP_CENTERS;
BOWKMeansTrainer bowTrainer(dictSize,tc,retries,flags);
BOWImgDescriptorExtractor bowDE(descriptors,matcher);
Mat img1 = imread("/Users/Lucas/Desktop/pic2.jpg");
Mat img2 = imread("/Users/Lucas/Desktop/2.jpg");
vector<KeyPoint> keypoints,keypoints2;
features->detect(img1, keypoints);
features->detect(img2, keypoints2);
Mat myFeatures;
Mat myFeatures2;
descriptors->compute(img1, keypoints, myFeatures);
descriptors->compute(img2, keypoints2, myFeatures2);
bowTrainer.add(myFeatures);
bowTrainer.add(myFeatures2);
Mat dictionary = bowTrainer.cluster();
bowDE.setVocabulary(dictionary);
cout << dictionary << endl;
return 0;
}
I have put this together by using a few tutorials and snippets, but I am running into an issue. When the program gets to
features->detect(img1, keypoints);
it exits with a segmentation fault 11, whatever that means. Could someone help me and point out what it is I am doing wrong?
you have to create your FeatureDetector, DescriptorExtractor first. atm, you got null-pointer instances (that's your segfault).
#include <opencv2/xfeatures2d.hpp>
...
Ptr<FeatureDetector> features = xfeatures2d::SIFT::create();
Ptr<DescriptorExtractor> descriptors = xfeatures2d::SIFT::create();
Ptr<DescriptorMatcher> matcher = makePtr<BFMatcher>(NORM_L2);
note, that since you have to use SIFT or SURF, you will need the opencv_contrib repo installed for this
I use OpenCV 2.44 and Visual Studio C++ 2010
When I compile this
#include <opencv2/imgproc/imgproc_c.h>
#include <stdio.h>
#include <math.h>
#include <opencv/highgui.h>
#include <opencv/cv.h>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/core/core.hpp>
#include <opencv2/nonfree/features2d.hpp>
#include <opencv2/legacy/legacy.hpp>
using namespace cv;
void main()
{
Mat img1 = imread( "hh.jpg", CV_LOAD_IMAGE_GRAYSCALE );
Mat img2 = imread( "hh.jpg", CV_LOAD_IMAGE_GRAYSCALE );
// detecting keypoints
FastFeatureDetector detector(15);
vector<KeyPoint> keypoints1;
detector.detect(img1, keypoints1);
// computing descriptors
SurfDescriptorExtractor extractor;
Mat descriptors1;
extractor.compute(img1, keypoints1, descriptors1);
when I run the code I get Unhandled exception at 0x580f375b in prj.exe: 0xC0000005: Access violation reading location 0x001f7014.
the error is at extractor
I'm using this tutorial link
It's looks like that you forget to init non-free module. Try to call appropriate function before using SurfDescriptorExtractor:
#include <opencv2/nonfree/nonfree.hpp>
...
cv::initModule_nonfree();
I want to try the new class FREAK in OpenCV 2.4.2.
I tried to use common interface of feature detector to construct FREAK, but,of course, it doesn't work. How should I revise my code to get result?
#include <stdio.h>
#include <iostream>
#include <opencv\cxcore.h>
#include <opencv2\nonfree\features2d.hpp>
#include <opencv\highgui.h>
#include <opencv2\features2d\features2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main(){
Mat mat1;
mat1 = imread("Testimg06.jpg",0);
vector<KeyPoint> P1;
Ptr<FeatureDetector> freakdes;
Ptr<DescriptorExtractor> descriptorExtractor;
freakdes = FeatureDetector::create("FREAK");
freakdes->detect(mat1,P1);
Mat keypoint_img;
drawKeypoints( mat1, P1, keypoint_img, Scalar::all(-1), DrawMatchesFlags::DEFAULT );
imshow("Keypoints 1", keypoint_img );
cvWaitKey(0);
}
FREAK is descriptor only. There is no corresponding feature detector.
So you need to combine it with one of the available detectors: FAST, ORB, SIFT, SURF, MSER or use goodFeaturesToTrack function.
There is an OpenCV example that shows how to use FREAK combined with FAST.
The basic instructions are:
FREAK extractor;
BruteForceMatcher<Hamming> matcher;
std::vector<KeyPoint> keypointsA, keypointsB;
Mat descriptorsA, descriptorsB;
std::vector<DMatch> matches;
FAST(imgA,keypointsA,10);
FAST(imgB,keypointsB,10);
extractor.compute( imgA, keypointsA, descriptorsA );
extractor.compute( imgB, keypointsB, descriptorsB );
matcher.match(descriptorsA, descriptorsB, matches);