labeling images to recognize in SURF object detector - c++

i am trying to implement a SURF object recognition software, so, i found this code in the internet that implements SURF and it works great (slow, but works), with this i can compare the template image to objects in a scene from my webcam and find them, what i am seeking now is how can i label this template so when the code finds it in the camera feed, the software would return the name of the object appearing., here is the code i have, sorry for no source, could not remember it.
Edit: I have been reading about Bag of Words, but it seems rather complicated to implement, i would like to know if there is a simpler way to do this.
Thank You All.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
int main()
{
Mat object = imread("C:\\teste.png", CV_LOAD_IMAGE_GRAYSCALE);
if (!object.data)
{
std::cout << "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
std::vector<KeyPoint> kp_object;
detector.detect(object, kp_object);
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute(object, kp_object, des_object);
FlannBasedMatcher matcher;
CvCapture* cap = cvCreateCameraCapture(0);
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, 320);
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, 240);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0, 0);
obj_corners[1] = cvPoint(object.cols, 0);
obj_corners[2] = cvPoint(object.cols, object.rows);
obj_corners[3] = cvPoint(0, object.rows);
char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
frame = cvQueryFrame(cap);
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect(image, kp_image);
extractor.compute(image, kp_image, des_image);
matcher.knnMatch(des_object, des_image, matches, 2);
for (int i = 0; i < min(des_image.rows - 1, (int)matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if ((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int)matches[i].size() <= 2 && (int)matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches(object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
if (good_matches.size() >= 4)
{
for (int i = 0; i < good_matches.size(); i++)
{
//Get the keypoints from the good matches
obj.push_back(kp_object[good_matches[i].queryIdx].pt);
scene.push_back(kp_image[good_matches[i].trainIdx].pt);
}
H = findHomography(obj, scene, CV_RANSAC);
perspectiveTransform(obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line(img_matches, scene_corners[0] + Point2f(object.cols, 0), scene_corners[1] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(object.cols, 0), scene_corners[2] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(object.cols, 0), scene_corners[3] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(object.cols, 0), scene_corners[0] + Point2f(object.cols, 0), Scalar(0, 255, 0), 4);
}
//Show detected matches
imshow("Good Matches", img_matches);
key = waitKey(1);
}
return 0;
}

Related

Non-related sintax errors - SURF algorithm

I'm trying to recognize objects by using SURF algorithm. Since I had some problems while installing the nonfree module, I decided to use an older version of OpenCV(2.4.11 I'm running it in Visual Studio 2013).
Now, I have some errors that aren't related to the sintax and I don't know what to do. Here is the code:
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
using namespace std;
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector( 500 );
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
int main()
{
//reference image
Mat object = imread( "C:\\Users\\patri\\Desktop\\test.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
VideoCapture cap(0);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
//capture one frame from video and store it into image object name 'frame'
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
//converting captured frame into gray scale
cvtColor(frame, image, CV_RGB2GRAY);
//extract detectors and descriptors of captured frame
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
//find matching descriptors of reference and captured image
matcher.knnMatch(des_object, des_image, matches, 2);
//finding matching keypoints with Euclidean distance 0.6 times the distance of next keypoint
//used to find right matches
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++)
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, frame, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//3 good matches are enough to describe an object as a right match.
if (good_matches.size() >= 3)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
try
{
H = findHomography( obj, scene, CV_RANSAC );
}
catch(Exception e){}
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
//clear array
good_matches.clear();
key = waitKey(1);
}
return 0;
}
I'm also attaching a picture of the errors I get. Please help me find the problem.
errors
About the warnings ("conversion from int to float"), I suppose the problem is the use of integer initializing cv::Point2f(); so, in lines 112-115, you should use Point2f( object.cols, 0.0) instead of Point2f( object.cols, 0).
But are only warnings.
The real problem is the linker error: you need the library with cv::SURF::SURF() (used by SurfFeatureDetector detector( 500 ); and SurfDescriptorExtractor extractor;).
You should add a library that I suppose is, in you case, opencv_nonfree241.lib. Or a similar name.

Opencv3.1, SURf, getting error

I am trying to run surf code given on this link with small changes.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/line_descriptor.hpp"
#include "opencv2\features2d\features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2\xfeatures2d\nonfree.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
void readme();
/** #function main */
int main(int argc, char** argv)
{
Mat img_object = imread("C:\\VC_examples\\IMG_0030.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat img_scene = imread("C:\\VC_examples\\IMG_0031.jpg", CV_LOAD_IMAGE_GRAYSCALE);
if (!img_object.data || !img_scene.data)
{
std::cout << " --(!) Error reading images " << std::endl; return -1;
}
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
cv::Ptr<Feature2D> detector = xfeatures2d::SURF::create(minHessian);
std::vector<KeyPoint> keypoints_object, keypoints_scene;
detector->detect(img_object, keypoints_object);
detector->detect(img_scene, keypoints_scene);
printf("-- dummy : %f \n", 1.0);
//-- Step 2: Calculate descriptors (feature vectors)
Mat descriptors_object, descriptors_scene;
ERROR LINE:
**detector->compute(img_object, keypoints_object, descriptors_object);**
detector->compute(img_scene, keypoints_scene, descriptors_scene);
//-- Step 3: Matching descriptor vectors using BFMatcher :
BFMatcher matcher;
std::vector< DMatch > matches;
matcher.match(descriptors_object, descriptors_scene, matches);
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for (int i = 0; i < descriptors_object.rows; i++)
{
double dist = matches[i].distance;
if (dist < min_dist) min_dist = dist;
if (dist > max_dist) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist);
printf("-- Min dist : %f \n", min_dist);
//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for (int i = 0; i < descriptors_object.rows; i++)
{
if (matches[i].distance < 3 * min_dist)
{
good_matches.push_back(matches[i]);
}
}
Mat img_matches;
drawMatches(img_object, keypoints_object, img_scene, keypoints_scene,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
std::vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;
for (int i = 0; i < good_matches.size(); i++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back(keypoints_scene[good_matches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, CV_RANSAC);
//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0, 0); obj_corners[1] = cvPoint(img_object.cols, 0);
obj_corners[2] = cvPoint(img_object.cols, img_object.rows); obj_corners[3] = cvPoint(0, img_object.rows);
std::vector<Point2f> scene_corners(4);
perspectiveTransform(obj_corners, scene_corners, H);
//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line(img_matches, scene_corners[0] + Point2f(img_object.cols, 0), scene_corners[1] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[1] + Point2f(img_object.cols, 0), scene_corners[2] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[2] + Point2f(img_object.cols, 0), scene_corners[3] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
line(img_matches, scene_corners[3] + Point2f(img_object.cols, 0), scene_corners[0] + Point2f(img_object.cols, 0), Scalar(0, 255, 0), 4);
//-- Show detected matches
imshow("Good Matches & Object detection", img_matches);
waitKey(0);
return 0;
}
/** #function readme */
void readme()
{
std::cout << " Usage: ./SURF_descriptor <img1> <img2>" << std::endl;
}
I m getting error
Error:
Exception thrown at 0x000007FEC3AA996C (opencv_xfeatures2d310.dll) in Example_SURF.exe: 0xC0000005: Access violation writing location 0x00000000002B0000.
If there is a handler for this exception, the program may be safely continued.
System info:
debug x64
Linker->input:
opencv_videostab310d.lib
opencv_video310d.lib
opencv_ts310d.lib
opencv_superres310d.lib
opencv_stitching310d.lib
opencv_photo310d.lib
opencv_objdetect310d.lib
opencv_ml310d.lib
opencv_imgproc310d.lib
opencv_highgui310d.lib
opencv_flann310d.lib
opencv_features2d310d.lib
opencv_core310d.lib
opencv_calib3d310d.lib
opencv_xobjdetect310d.lib
opencv_xfeatures2d310.lib
opencv_surface_matching310d.lib
opencv_imgcodecs310d.lib
Any idea?
I think you are using the wrong version of the library. In your system information I can see that all the libs end with a "d" which is for "Debug" however opencv_xfeatures2d310.dll shows a "Release" library. Try changing your build mode or the libs you are using.

Align images based on a detected features in Opencv

Hi I've a base image and other images that I'd like to rotate with the same angle as the base image.
this is my base image.
this is an example image that I'd like to rotate.
here my full code.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#define PI 3.14159265
using namespace cv;
using namespace std;
void rotate(cv::Mat& src, double angle, cv::Mat& dst)
{
int len = std::max(src.cols, src.rows);
cv::Point2f pt(len/2., len/2.);
cv::Mat r = cv::getRotationMatrix2D(pt, angle, 1.0);
cv::warpAffine(src, dst, r, cv::Size(len, len));
}
float angleBetween(const Point &v1, const Point &v2)
{
float len1 = sqrt(v1.x * v1.x + v1.y * v1.y);
float len2 = sqrt(v2.x * v2.x + v2.y * v2.y);
float dot = v1.x * v2.x + v1.y * v2.y;
float a = dot / (len1 * len2);
if (a >= 1.0)
return 0.0;
else if (a <= -1.0)
return PI;
else{
int degree;
degree = acos(a)*180/PI;
return degree;
};
}
int main()
{
Mat char1 = imread( "/Users/Rodrane/Documents/XCODE/OpenCV/mkedenemeleri/anarev/rotated.jpg",CV_LOAD_IMAGE_GRAYSCALE );
Mat image = imread("/Users/Rodrane/Documents/XCODE/OpenCV/mkedenemeleri/anarev/gain2000_crop.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !char1.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
GaussianBlur( char1, char1, Size(3, 3), 2, 2 );
GaussianBlur( image, image, Size(3, 3), 2, 2 );
adaptiveThreshold(char1,char1,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,9,14);
adaptiveThreshold(image,image,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,9,14);
//Detect the keypoints using SURF Detector
int minHessian = 200;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( char1, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( char1, kp_object, des_object );
FlannBasedMatcher matcher;
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( char1.cols, 0 );
obj_corners[2] = cvPoint( char1.cols, char1.rows );
obj_corners[3] = cvPoint( 0, char1.rows );
Mat frame;
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( char1, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
cout<<angleBetween(obj[i],scene[i])<<endl; //angles between images
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
// cout<<angleBetween(obj[0], scene[0])<<endl;
//Draw lines between the corners (the mapped object in the scene image )
}
//Show detected matches
// resize(img_matches, img_matches, Size(img_matches.cols/2, img_matches.rows/2));
imshow( "Good Matches", img_matches );
waitKey();
return 0;
}
what actually my code doing is;
I do detect features of both images
Calculate degrees between point of my base image and example image
since all degrees between points are different how can I rotate my image depending on a features?
also for example lets say features of character M is detected and the angle is 30 in some conditions rotating image for degree 30 will give me horizontally aligned but vertically wrong.
the problem is even the first features are in the same line this doesn't mean example image rotated correctly (it might need rotate 180 degrees more for example)
I remade your function without using the angles:
void rotate(cv::Mat& originalImage,cv::Mat& rotatedImage,cv::InputArray rotated,
cv::Mat& dst) {
std::vector<cv::Point2f> original(4);
original[0] = cv::Point( 0, 0);
original[1] = cv::Point( originalImage.cols, 0 );
original[2] = cv::Point( originalImage.cols, originalImage.rows );
original[3] = cv::Point( 0, originalImage.rows );
dst = cv::Mat::zeros(originalImage.rows, originalImage.cols, CV_8UC3);
cv::Mat transform = cv::getPerspectiveTransform(rotated, original);
cv::warpPerspective(rotatedImage, dst, transform, dst.size() );
}
Note that the input 'rotated' is in your case 'scene_corners' and 'dst' is the resulting image.
Hope that helps!

error in miniflann.cpp using SURF descriptors

I have been working with Opencv for such time. This time, I faced a problem that irritated me so much.
In fact, I have a template image and i want to use the matching to recognize it in my camera stream but I face such console error:
OpenCV Error: Unsupported format or combination of formats (type=0
) in unknown function, file ..\..\..\opencv\modules\flann\src\miniflann.cpp, lin
e 299
In fact this is the code and it compiles well but the error appears in execution.
#include "stdafx.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"
#include "opencv2/features2d/features2d.hpp"
//#include "opencv2/legacy/legacy.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
using namespace std;
int main()
{
//reference image
Mat object = imread( "tel_tmpl.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
char key = 'a';
int framecount = 0;
SurfFeatureDetector detector( 500 );
SurfDescriptorExtractor extractor;
FlannBasedMatcher matcher;
Mat frame, des_object, image;
Mat des_image, img_matches, H;
std::vector<KeyPoint> kp_object;
std::vector<Point2f> obj_corners(4);
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
//compute detectors and descriptors of reference image
detector.detect( object, kp_object );
extractor.compute( object, kp_object, des_object );
//create video capture object
VideoCapture cap(0);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
//wile loop for real time detection
while (key != 27)
{
//capture one frame from video and store it into image object name 'frame'
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
//converting captured frame into gray scale
cvtColor(frame, image, CV_RGB2GRAY);
//extract detectors and descriptors of captured frame
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
//find matching descriptors of reference and captured image
matcher.knnMatch(des_object, des_image, matches, 2);
//finding matching keypoints with Euclidean distance 0.6 times the distance of next keypoint
//used to find right matches
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++)
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, frame, kp_image, good_matches, img_matches,
Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//3 good matches are enough to describe an object as a right match.
if (good_matches.size() >= 3)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
try
{
H = findHomography( obj, scene, CV_RANSAC );
}
catch(Exception e){}
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
//clear array
good_matches.clear();
key = waitKey(1);
}
return 0;
}
Thanks in advance
When I changed the camera, it works good, I don't know why should I change the camera?!

OpenCV : How to find the center of mass/centroid for motion information

The thing is I am unable to implement the center of mass with the existing code, which image object to use etc after the detected object is bounded by the rectangle so that I may get the trajectory of the path.
I am using Opencv2.3 .I found out there are 2 methods - Link1 and Link2 talk about the usage of moments. And the other method is to use the information of the bounding box Link3. The method of moments requires image thresholding. However, when using SURF the image is in gray scale. So, on passing a gray image for thresholding displays a white image! Now, I am having a tough time in understanding how I should calculate the centroid using the code below (esp what should I use instead of points[i].x since I am using
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt )
where in my case numPoints=good_matches.size(), denoting the number of feature points) as mentioned in the documentation. If anyone can put up an implementation of how to use SURF with centroid then it will be helpful.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
using namespace cv;
int main()
{
Mat object = imread( "object.png", CV_LOAD_IMAGE_GRAYSCALE );
if( !object.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
char key = 'a';
int framecount = 0;
while (key != 27)
{
Mat frame;
cap >> frame;
if (framecount < 5)
{
framecount++;
continue;
}
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
Mat image;
cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][4].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
//Draw lines between the corners (the mapped object in the scene image )
line( img_matches, scene_corners[0] + Point2f( object.cols, 0), scene_corners[1] + Point2f( object.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( object.cols, 0), scene_corners[2] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( object.cols, 0), scene_corners[3] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( object.cols, 0), scene_corners[0] + Point2f( object.cols, 0), Scalar( 0, 255, 0), 4 );
}
//Show detected matches
imshow( "Good Matches", img_matches );
key = waitKey(1);
}
return 0;
}
so, you already got your pointlists,
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
i think, it's perfectly valid, to calc the centroid based on that, no further image processing nessecary.
there's 2 methods, the 'center of mass' way, that's just the mean position of all points, like this:
Point2f cen(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
cen.x += scene[i].x;
cen.y += scene[i].y;
}
cen.x /= scene.size();
cen.y /= scene.size();
and the 'center of bbox' way
Point2f pmin(1000000,1000000);
Point2f pmax(0,0);
for ( size_t i=0; i<scene.size(); i++ )
{
if ( scene[i].x < pmin.x ) pmin.x = scene[i].x;
if ( scene[i].y < pmin.y ) pmin.y = scene[i].y;
if ( scene[i].x > pmax.x ) pmax.x = scene[i].x;
if ( scene[i].y > pmax.y ) pmax.y = scene[i].y;
}
Point2f cen( (pmax.x-pmin.x)/2, (pmax.y-pmin.y)/2);
note, that the results will be different ! they're only the same for circles & squares, point symmetric objects
// now draw a circle around the centroid:
cv::circle( img, cen, 10, Scalar(0,0,255), 2 );
// and a line connecting the query and train points:
cv::line( img, scene[i], obj[i], Scalar(255,0,0), 2 );