I've been trying to make a SURF implementation work and I have been having some trouble, now I finally think i got it 'right' but I have one small problem.
The problem is pretty straight forward and I am sure its something simple but I can't solve it. The image displays right and everything but the matches are not drawn. Here is my code.
#include <stdio.h>
#include "opencv2/xfeatures2d.hpp"
#include <opencv2/opencv.hpp>
#include "opencv2/features2d/features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv ){
if ( argc != 3 ) {
printf("usage: DisplayImage.out <Image1_Path> <Image2_Path>\n");
return -1;
}
Mat image, image2;
image = imread( argv[1], 0 );
image2 = imread(argv[2], 0);
if ( !image.data ){
printf("No image data \n");
return -1;
}
if ( !image2.data ){
printf("No image data \n");
return -1;
}
/*
Ptr<FeatureDetector> detector = FastFeatureDetector::create(15);
vector<KeyPoint> keypoints1, keypoints2;
detector->detect(image, keypoints1);
detector->detect(image2, keypoints2);
Ptr<xfeatures2d::SURF> extractor = xfeatures2d::SURF::create();
Mat descriptors1, descriptors2;
extractor->compute(image, keypoints1, descriptors1);
extractor->compute(image2, keypoints2, descriptors2);
BFMatcher::BFMatcher matcher(L2<float>);
//BruteForceMatcher<L2<float> > matcher;
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
*/
Ptr<xfeatures2d::SURF> surf = cv::xfeatures2d::SURF::create();
vector<KeyPoint> keypoints1, keypoints2;
Mat descriptors1, descriptors2;
surf->detectAndCompute(image,noArray(), keypoints1, descriptors1, true);
//BFMatcher::BFMatcher matcher(L2<float>);
BFMatcher::BFMatcher matcher(NORM_HAMMING);
vector<DMatch> matches;
Mat res;
matcher.match(descriptors1,descriptors2, matches);
drawMatches(image, keypoints1, image2, keypoints2, matches, res);
namedWindow("Display Image", WINDOW_AUTOSIZE );
imshow("Display Image", res);
waitKey(0);
return 0;
}
Related
I am using OpenCV to add a face recognition feature to my C++ program. I have never used it before and I cant seem to get the cascade feature for facial recognition to work. I am wondering if they have made some changes for the FLAGS in the new version. I can display an image but when that comes to the cascade it always throws an error. Can anyone tell me what am I missing?
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <stdio.h>
#include <iostream>
#include <opencv2\objdetect.hpp>
using namespace std;
using namespace cv;
String face_cascade_name = "sources/data/haarcascades/haarcascade_frontalface_default.xml";
String eyes_cascade_name = "sources/data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";
String smile_cascade_name = "sources/data/haarcascades/haarcascade_smile.xml";
CascadeClassifier face_cascade;
CascadeClassifier eyes_cascade;
CascadeClassifier smile_cascade;
string window_name = "Capture - Face detection";
RNG rng(12345);
int main()
{
//-- 1. Load the cascades
if (!face_cascade.load(face_cascade_name)) { printf("--(!)Error loading file 1\n"); return -1; };
if (!eyes_cascade.load(eyes_cascade_name)) { printf("--(!)Error loading file 2\n"); return -1; };
if (!smile_cascade.load(smile_cascade_name)) { printf("--(!)Error loading file 3\n"); return -1; };
std::string image_path = samples::findFile("test.jpg");
Mat img = imread(image_path, IMREAD_COLOR);
Mat img_gry;
if (img.empty())
{
std::cout << "Could not read the image: " << image_path << std::endl;
return 1;
}
imshow("Display window", img);
// Detect faces
std::vector<Rect> faces;
cvtColor(img, img_gry, COLOR_BGR2GRAY);
equalizeHist(img, img_gry);
//I GET ERROR HERE
face_cascade.detectMultiScale(img_gry, faces, 1.1, 2, CASCADE_SCALE_IMAGE, Size(30, 30), Size(130, 130)); //I GET ERROR HERE
/*...REST WILL BE PARSING faces...*/
int g_key = waitKey(0); // Wait for a keystroke in the window
if (g_key == 's')
{
imwrite("starry_night.png", img); //save image in same path
}
return 0;
}
I modified the code as:
/*...
OTHER CODE
...*/
std::vector<Rect> faces;
cvtColor(img, img_gry, COLOR_BGR2GRAY);
equalizeHist(img_gry, img_gry);
face_cascade.detectMultiScale(img_gry, faces);
for (size_t i = 0; i < faces.size(); i++)
{
/*...PARSE...*/
}
imshow("Display window", img);
As #SourceCode mentioned :
equalizeHist( smallImg, smallImg); //my variable is img_gry instead.
But also, I modified :
face_cascade.detectMultiScale(img_gry, faces, 1.1, 2, CASCADE_SCALE_IMAGE, Size(30, 30), Size(130, 130));
To
face_cascade.detectMultiScale(img_gry, faces);
Otherwise it does not work in my case.
Source if that helps: https://docs.opencv.org/3.4/db/d28/tutorial_cascade_classifier.html
Have you tried using something like:
face_cascade.detectMultiScale(img_gry, faces, 1.1, 2, 0|CASCADE_SCALE_IMAGE, Size(30, 30));
I'm trying to use SimpleBlobDetector in OpenCV 3 to detect blobs of heat in thermal images, for example people. Any simple code or example will be appreciated.
i tried`
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include "opencv2\features2d.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
if (argc != 2)
{
cout << " Usage: display_image ImageToLoadAndDisplay" << endl;
return 0;
}
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); // Read the file
if (!image.data) // Check for invalid input
{
cout << "Could not open or find the image" << std::endl;
return 0;
}
// Set up the detector with default parameters.
//SimpleBlobDetector detector;
// Setup SimpleBlobDetector parameters.
SimpleBlobDetector::Params params;
// Change thresholds
params.minThreshold = 50;
params.maxThreshold = 200;
// Filter by Area.
params.filterByArea = true;
params.minArea = 1500;
// Filter by Circularity
params.filterByCircularity = true;
params.minCircularity = 0.1;
// Filter by Convexity
params.filterByConvexity = true;
params.minConvexity = 0.87;
// Filter by Inertia
params.filterByInertia = true;
params.minInertiaRatio = 0.01;
// Detect blobs.
std::vector<KeyPoint> keypoints;
cv::Ptr<cv::SimpleBlobDetector> detector = cv::SimpleBlobDetector::create(params);
//detector->detect(img, keypoints);
detector->detect(image, keypoints);
//params.detect(image, keypoints);
// Draw detected blobs as red circles.
//DrawMatchesFlags::DRAW_RICH_KEYPOINTS flag ensures the size of the circle corresponds to the size of blob
Mat im_with_keypoints;
drawKeypoints(image, keypoints, im_with_keypoints, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
// Show blobs
imshow("keypoints", im_with_keypoints);
waitKey(0);
//namedWindow("Display window", WINDOW_AUTOSIZE);// Create a window for display.
//imshow("Display window", image); // Show our image inside it.
//waitKey(0); // Wait for a keystroke in the window
//return 0;
}
` but it just returns the gray image unchanged.
To detect blobs with OpenCV you need to:
Instantiate a SimpleBlobDetector type
Declare a vector of type KeyPoints
Call SimpleBlobDetector::detect()
There is a brilliant tutorial online here (where I nicked the code from): https://www.learnopencv.com/blob-detection-using-opencv-python-c/
using namespace cv;
Mat im = imread( "blob.jpg", IMREAD_GRAYSCALE );
SimpleBlobDetector detector;
std::vector<KeyPoint> keypoints;
detector.detect( im, keypoints);
drawKeypoints( im, keypoints, im_with_keypoints, Scalar(0,0,255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS );
imshow("keypoints", im_with_keypoints );// Show blobs
waitKey(0);
You can also adjust parameters to select blobs with specific attributes, it's all listed in the tutorial. I'd suggest having a play, to get a feel for how it works.
Hy, i have error when i stitch frame from video,
here's my code
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace std;
using namespace cv;
Mat Stitching(Mat image1,Mat image2){
Mat gray_image1;
Mat gray_image2;
// Convert to Grayscale
cvtColor( image1, gray_image1, CV_RGB2GRAY );
cvtColor( image2, gray_image2, CV_RGB2GRAY );
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 10;
SurfFeatureDetector detector( minHessian );
std::vector< KeyPoint > keypoints_object, keypoints_scene;
detector.detect( gray_image1, keypoints_object );
detector.detect( gray_image2, keypoints_scene );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute( gray_image1, keypoints_object, descriptors_object );
extractor.compute( gray_image2, keypoints_scene, descriptors_scene );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 3*min_dist )
{ good_matches.push_back( matches[i]); }
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for( int i = 0; i < good_matches.size(); i++ )
{
//-- Get the keypoints from the good matches
obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}
// Find the Homography Matrix
Mat H = findHomography( obj, scene, CV_RANSAC );
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective(image1,result,H,cv::Size(800,600));
cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
image2.copyTo(half);
//imshow( "Result", result );
return result;
}
/** #function main */
int main( int argc, char** argv )
{
// Load the images
//Mat image1= imread( "E:\\Tugas Akhir\\image\\city2.jpg" );
//Mat image2= imread( "E:\\Tugas Akhir\\image\\city1.jpg" );
char *fileName = "E:\\Tugas Akhir\\Video Master\\indv_img_3a.avi";
/* Create a window */
cvNamedWindow("Stitching", CV_WINDOW_AUTOSIZE);
/* capture frame from video file */
CvCapture* capture = cvCreateFileCapture(fileName);
/* Create IplImage to point to each frame */
IplImage* frame;
IplImage before_frame;
Mat image1;
Mat image2;
cv::Mat result;
/* Loop until frame ended or ESC is pressed */
int loop=0;
//imshow( "Result", Stitching(image1,image2));
while(1) {
frame = cvQueryFrame(capture);
if(loop>0){
if(!frame) break;
image2=Mat(frame, false);
result=Stitching(image1,image2);
before_frame=result;
frame=&before_frame;
image1=result;
image2.release();
//imshow("Stitching",frame);
cvShowImage("Stitching",frame);
//break;
}else if(loop==0){
//Mat aimage1(frame);
image1=Mat(frame, false);
}
loop++;
char c = cvWaitKey(33);
if(c==27) break;
}
cvReleaseCapture(&capture);
/* delete window */
// cvDestroyWindow("Stitching");
// return EXIT_SUCCESS;
waitKey(0);
return 0;
}
if i load from image file, it works, image stitched, but when i try to stitch image from every video frame , it shows error
First-chance exception at 0x000007f886dd64a8 in matchingHomography.exe: Microsoft C++ exception: cv::Exception at memory location 0x0080e3b0..
Unhandled exception at 0x000007f886dd64a8 in matchingHomography.exe: Microsoft C++ exception: cv::Exception at memory location 0x0080e3b0..
line error
Mat H = findHomography( obj, scene, CV_RANSAC );
what's the error mean? and how to solve it
thanks
First off, you seem to be mixing C and C++ interfaces of OpenCV (OpenCV VideoCapture doc). For better readability stick to one of them (Since you are using C++ just stick to using C++ functions).
Since loading from image works, but Video doesn't, your video loading is probably the problem.
Try using cv::imshow("testWindow", frame) to show the frame loaded from video. Most likely there was no frame loaded.
One possible cause is that the video file is encoded in a format not supported by OpenCV. To check you can also run grab() and then retrieve(). The grab function will return if it was successful or not. Try grabbing a couple of frames, if all of them fail you probably don't have the necessary codec to decode this video.
Following is my code, which used to extract the features using SURF and which will match the points using flannBasedMatcher.
Mat object = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
if( !object.data )
{
// std::cout<< "Error reading object " << std::endl;
return -2;
}
//Detect the keypoints using SURF Detector
int minHessian = 500;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( object, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( object, kp_object, des_object );
FlannBasedMatcher matcher;
char key = 'a';
//VideoCapture cap(0);
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( object.cols, 0 );
obj_corners[2] = cvPoint( object.cols, object.rows );
obj_corners[3] = cvPoint( 0, object.rows );
Mat image = imread("S6E0.bmp", CV_LOAD_IMAGE_GRAYSCALE);
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch >> matches;
std::vector<std::vector<cv::DMatch>> matches1;
std::vector<std::vector<cv::DMatch>> matches2;
std::vector<cv::DMatch> matches3;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
//cvtColor(frame, image, CV_RGB2GRAY);
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( object, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
printf("Images matching %d , %d", good_matches.size(), kp_object.size());
//return 1;
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
}
//H = findHomography( obj, scene, CV_RANSAC );
//printf("Size : %d", H.size());
//perspectiveTransform( obj_corners, scene_corners, H);
//printf("Size : %d --- %d --- %d", H.size(), scene_corners.size());
}else{
printf("Images matching %d , %d", good_matches.size(), kp_object.size());
}
//Show detected matches
imshow( "Good Matches", img_matches );
waitKey(0);
return 0;
In this code i want to know what exactly happens through this method
matcher.knnMatch(des_object, des_image, matches, 2);
As i know i passes the two descriptors of the matching images and the matches vector is filled with 2 nearest neighbors. I want to know what exactly happens in the method and how the matches method is filled and what points are filled to it.
In this code segment
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
im using the nearest neibour distance ratio(nndr) as 0.6, i wanted to know how the good_matches are find out and how the nndr value change will effect.
It would be a great help, if i could resolve this code.
Thanks.
The FlannBasedMatcher is based on the paper written by Muja et. al.; you can find the exact algorithm and how they go about it there..
Regarding the good_matches, you just saw in the code snippet itself that it is a collection of the best matches your result has based on the criteria, i.e., nndr.. It is basically a threshold that decides how far a match is allowed before dropping the match altogether.. Higher the threshold, more points are considered and more are the number of positive matches (whether they are true positives or not will be determined by your dataset and the way you have set the nndr level)..
Hope this helps.
I want to capture a video and display it on one window and have second window in which contours are displayed simultaneous. I am struggling with how to have the processed video displayed in the second window. Please analyze my code and suggest a solution or indicate where am going wrong maybe give me some directions to an online tutorial or sources. Thanks.
#include "iostream"
#include<opencv\cv.h>
#include<opencv\highgui.h>
#include<opencv\ml.h>
#include<opencv\cxcore.h>
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/core/core.hpp> // Basic OpenCV structures (cv::Mat)
#include <opencv2/highgui/highgui.hpp> // Video write
using namespace cv;
using namespace std;
Mat image; Mat image_gray; Mat image_gray2; Mat threshold_output;
Mat frame;
int thresh=100, max_thresh=255;
int main(int argc, char** argv) {
//Capture Video
VideoCapture capCam(1);
if (!capCam.isOpened()){
cout<<"ERROR: Failed to Initialize Camera"<<endl;
return 1;
}
else{
cout<<"Camera Initialized"<<endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
imshow(ImputFootage, frame);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
imshow(OutputFootage, frame);
while(1){
capCam>> frame;
imshow("Source", frame);
return(1);
if(capCam.read(ImputFootage)){
//Convert Image to gray & blur it
cvtColor( image,
image_gray,
CV_BGR2GRAY );
blur( image_gray,
image_gray2,
Size(3,3) );
//Threshold Gray&Blur Image
threshold(image_gray2,
threshold_output,
thresh,
max_thresh,
THRESH_BINARY);
//2D Container
vector<vector<Point>> contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output,
contours, // a vector of contours
CV_RETR_EXTERNAL,// retrieve the external contours
CV_CHAIN_APPROX_NONE,
Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(),CV_8U,Scalar(255));
drawContours(result,contours,
-1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
}
}
char CheckForEscKey = waitKey(10);
return 1;
}
You should call imshow("Processed", result); after calling drawContours
You were trying to show frames even before they were captured with camera. Compiler was not giving you error because Mat were declared ,but they were without value (null), Moreover you were trying to display Mat image, but what you capture from camera is Mat frame. Also, you lack exit (esc sequence, and your wait key was OUT of camera loop.
Anyway, here is your code (rewritten), I hope this is what you wanted.
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <cstdio>
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
using namespace std;
using namespace cv;
Mat image;
Mat image_gray;
Mat image_gray2;
Mat threshold_output;
Mat frame;
int thresh = 100, max_thresh = 255;
int main(int argc, char** argv)
{
//Capture Video
VideoCapture capCam(0);
if (!capCam.isOpened())
{
cout << "ERROR: Failed to Initialize Camera" << endl;
return 1;
}
else
{
cout << "Camera Initialized" << endl;
}
//Create Window
char* ImputFootage = "Source";
namedWindow(ImputFootage, CV_WINDOW_AUTOSIZE);
char* OutputFootage = "Processed";
namedWindow(OutputFootage, CV_WINDOW_AUTOSIZE);
while (1)
{
capCam >> frame;
imshow(ImputFootage, frame);
if (capCam.read(frame))
{
//Convert Image to gray & blur it
cvtColor(frame, image_gray, CV_BGR2GRAY);
blur(image_gray, image_gray2, Size(3, 3));
//Threshold Gray&Blur Image
threshold(image_gray2, threshold_output, thresh, max_thresh, THRESH_BINARY);
//2D Container
vector<vector<Point> > contours;
//Fnd Countours Points, (Imput Image, Storage, Mode1, Mode2, Offset??)
findContours(threshold_output, contours, // a vector of contours
CV_RETR_EXTERNAL, // retrieve the external contours
CV_CHAIN_APPROX_NONE, Point(0, 0)); // all pixels of each contours
// Draw black contours on a white image
Mat result(threshold_output.size(), CV_8U, Scalar(255));
drawContours(result, contours, -1, // draw all contours
Scalar(0), // in black
2); // with a thickness of 2
imshow(OutputFootage, result);
char CheckForEscKey = waitKey(10);
//If the key pressed by user is Esc(ASCII is 27) then break out of the loop
if (CheckForEscKey == 27)
{
break;
}
}
}
return 0;
}