I'm doing a project on opencv on image matching.
The lines
std::vector<cv::Keypoint> keypoints1;
std::vector<cv::Keypoint> keypoints2;
have the error: namespace "cv" has no member "Keypoint"
How do i solve this?
Another error is in the code
//Define feature detector
cv::FastFeatureDetector fastDet(80);
//Keypoint detection
fastDet.detect(image1, keypoints1);
fastDet.detect(image2, keypoints2);
where the error says:
object of abstract class type "cv::FastFeatureDetector" is not allowed:
function :cv::FastFeatureDetector::setThreshold" is a pure virtual function
function :cv::FastFeatureDetector::getThreshold" is a pure virtual function
function :cv::FastFeatureDetector::setNonmaxSuppression" is a pure virtual function
function :cv::FastFeatureDetector::getNonmaxSuppression" is a pure virtual function
function :cv::FastFeatureDetector::setType" is a pure virtual function
function :cv::FastFeatureDetector::getType" is a pure virtual function
Can someone please help?
Here is the whole code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2\features2d\features2d.hpp"
#include"opencv2\core.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
using namespace cv;
using namespace std;
void main(int argc, const char** argv)
{
Mat image1 = imread("image1.jpg", CV_LOAD_IMAGE_UNCHANGED);
Mat image2 = imread("image2.jpg", CV_LOAD_IMAGE_UNCHANGED);
//Define keypoints vector
std::vector<cv::Keypoint> keypoints1;
std::vector<cv::Keypoint> keypoints2;
//Define feature detector
cv::FastFeatureDetector fastDet(80);
//Keypoint detection
fastDet.detect(image1, keypoints1);
fastDet.detect(image2, keypoints2);
//Define a square neighbourhood
const int nsize(11); //size of the neighbourhood
cv::Rect neighbourhood(0, 0, nsize, nsize); //11x11
cv::Mat patch1;
cv::Mat patch2;
//For all points in first image
//find the best match in second image
cv::Mat result;
std::vector<cv::DMatch> matches;
//for all keypoints in image 1
for (int i = 0; i < keypoints1.size(); i++)
{
//define image patch
neighbourhood.x = keypoints1[i].pt.x - nsize / 2;
neighbourhood.y = keypoints1[i].pt.y - nsize / 2;
//if neighbourhood of points outside image,
//then continue with next point
if (neighbourhood.x < 0 || neighbourhood.y < 0 || neighbourhood.x + nsize >= image1.cols || neighbourhood.y + nsize >= image1.rows)
continue;
//patch in image 1
patch1 = image1(neighbourhood);
//reset best correlation value;
cv::DMatch bestMatch;
//for all keypoints in image 2
for (int j = 0; j < keypoints2.size(); j++)
{
//define image patch
neighbourhood.x = keypoints2[j].pt.x - nsize / 2;
neighbourhood.y = keypoints2[j].pt.y - nsize / 2;
//if neighbourhood of points outside image,
//then continue with next point
if (neighbourhood.x < 0 || neighbourhood.y < 0 || neighbourhood.x + nsize >= image2.cols || neighbourhood.y + nsize >= image2.rows)
continue;
//patch in image 2
patch2 = image2(neighbourhood);
//match the 2 patches
cv::matchTemplate(patch1, patch2, result, CV_TM_SQDIFF_NORMED);
//check if it is best match
if (result.at<float>(0, 0) < bestMatch.distance)
{
bestMatch.distance = result.at<float>(0, 0);
bestMatch.queryIdx = i;
bestMatch.trainIdx = j;
}
}
//add the best match
matches.push_back(bestMatch);
}
//extract the 25 best matches
std::nth_element(matches.begin(), matches.begin() + 25, matches.end());
matches.erase(matches.begin() + 25, matches.end());
//Draw matching results
cv::Mat matchImage;
cv::DrawMatchesFlags();
}
There are some mistakes in your code.
Replace belowe line
std::vector<cv::Keypoint> keypoints1;
std::vector<cv::Keypoint> keypoints2;
with this
std::vector<cv::KeyPoint> keypoints1;
std::vector<cv::KeyPoint> keypoints2;
For cv::FastFeatureDetector fastDet(80); may be you need to include library opencv_features2d
After this changes your code will run successfully.
Related
I am learning how to stitch two images together using the below link but whatever I do to calculate the homography and warpPerspective, two images won't stitch together.
https://learnopencv.com/feature-based-image-alignment-using-opencv-c-python/
Below is the source code for image stitching
Include Section
#include <opencv2/features2d.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/opencv.hpp>
#include <opencv2/core/types.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <vector>
#include <iostream>
Global Variables
using namespace std;
using namespace cv;
const float inlier_threshold = 2.5f; // Distance threshold to identify inliers
const float nn_match_ratio = 0.8f; // Nearest neighbor matching ratio
Main Function
int main(void)
{
puts("opening");
Mat img1 = imread("uttower_right.jpg", IMREAD_GRAYSCALE); // To be Aligned
Mat img2 = imread("large2_uttower_left.jpg", IMREAD_GRAYSCALE); // Reference
Mat img3 = Mat(img2.rows, img2.cols, CV_8UC1);
//img2.copyTo(img3);
Mat homography;
vector<KeyPoint> kpts1, kpts2;
Mat desc1, desc2;
puts("Have opened");
Ptr<AKAZE> akaze = AKAZE::create();
akaze->detectAndCompute(img1, noArray(), kpts1, desc1);
akaze->detectAndCompute(img2, noArray(), kpts2, desc2);
puts("have commputed akaze");
BFMatcher matcher(NORM_HAMMING);
vector< vector<DMatch> > nn_matches;
matcher.knnMatch(desc1, desc2, nn_matches, 2);
puts("Have done match");
vector<KeyPoint> matched1, matched2;
vector<Point2f> inliers1, inliers2;
for (size_t i = 0; i < nn_matches.size(); i++) {
DMatch first = nn_matches[i][0];
float dist1 = nn_matches[i][0].distance;
float dist2 = nn_matches[i][1].distance;
if (dist1 < nn_match_ratio * dist2) {
matched1.push_back(kpts1[first.queryIdx]);
matched2.push_back(kpts2[first.trainIdx]);
inliers1.push_back(kpts1[first.queryIdx].pt);
inliers2.push_back(kpts1[first.trainIdx].pt);
}
}
printf("Matches %d %d\n", matched1.size(), matched2.size());
homography = findHomography(inliers1, inliers2, RANSAC);
warpPerspective(img1, img3, homography, img2.size());
//Display input and output
imshow("Input1", img1);
imshow("Input2", img2);
imshow("Input3", img3);
waitKey(0);
return 0;
}
Images used
Cross post here
I note the function FlannBasedMatcher::match have a parameter mask, so I give a try with following code:
#include<opencv.hpp>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
using namespace cv;
using namespace std;
int main() {
Mat rightImg = imread("right.jpg", 0);
Mat leanImg = imread("lean.jpg", 0);
if (!rightImg.data || !leanImg.data) {
cout << "Fail to read your image. Please check your path.\n";
return -1;
}
resize(leanImg, leanImg, rightImg.size());
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint> keypoints_right, keypoints_lean;
detector.detect(rightImg, keypoints_right);
detector.detect(leanImg, keypoints_lean);
Mat med_right, med_lean;
drawKeypoints(rightImg, keypoints_right, med_right);
drawKeypoints(leanImg, keypoints_lean, med_lean);
SurfDescriptorExtractor extractor;
Mat descriptors_right, descriptors_lean;
extractor.compute(rightImg, keypoints_right, descriptors_right);
extractor.compute(leanImg, keypoints_lean, descriptors_lean);
FlannBasedMatcher matcher;
vector< DMatch > matches;
Mat mask(descriptors_right.rows, descriptors_lean.rows, CV_8UC1, Scalar(0));
Mat target(rightImg.size(), CV_8UC1, Scalar(255));
ellipse(target, Point(rightImg.cols / 2, rightImg.rows / 2), Size(rightImg.cols / 2, rightImg.rows / 2), 0, 0, 360, Scalar(0), CV_FILLED);
for (int i = 0; i < mask.rows; i++) {
uchar* pixrow = mask.ptr<uchar>(i);
for (int j = 0; j < mask.cols; j++) {
if (target.at<uchar>(keypoints_right[i].pt) == 255)
pixrow[j] = 255;
}
}
matcher.match(descriptors_right, descriptors_lean, matches/*, mask*/);//use it or not to test
Mat img_matches;
drawMatches(rightImg, keypoints_right, leanImg, keypoints_lean,
matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
return 0;
}
And this is my right.jpg and lean.jpg. I don't care those points in the center of right.jpg. So I make a mask for it. But I note I will get a same result totally whether I use the mask in the function FlannBasedMatcher::match. You can use the mask or not to reproduce it. Do I have missed something or the OpenCV have a bug in my 2.4.13? Can anyone tell me how to use the mask in the FlannBasedMatcher::match? I think it is a usefull parameter..
From the docs: "FlannBasedMatcher does not support masking permissible matches of descriptor sets because flann::Index does not support this." See the DescriptorMatcher::isMaskSupported method for a way to test if the matcher supports masking.
So I combined squares.cpp with cvBoundingRect.cpp code to detect squares in video. I therefore, had to convert from IplImage to Mat type so that findSquares and drawSquares methods could run (By using cvarrToMat function). But unfortunately, after successful compilation I get this error when running:
OpenCV Error: Assertion failed (j < nsrcs && src[j].depth() == depth) in mixChannels, file /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp, line 1205
libc++abi.dylib: terminating with uncaught exception of type cv::Exception: /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp:1205: error: (-215) j < nsrcs && src[j].depth() == depth in function mixChannels
Abort trap: 6
Here's the code:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(255,0,0), 3, LINE_AA);
}
imshow(wndname, image);
}
CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
cam = cvCaptureFromCAM(0);
//create storage for contours
storage = cvCreateMemStorage(0);
//capture current frame from webcam
currentFrame = cvQueryFrame(cam);
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame->width;
imgSize.height = currentFrame->height;
//Images to use in the program.
currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
namedWindow( wndname, 1 );
vector<vector<Point> > squares;
while(1)
{
currentFrame = cvQueryFrame( cam );
if( !currentFrame ) break;
//Convert the image to grayscale.
cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);
if(first) //Capturing Background for the first time
{
differenceImg = cvCloneImage(currentFrame_grey);
oldFrame_grey = cvCloneImage(currentFrame_grey);
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
first = false;
continue;
}
//Minus the current frame from the moving average.
cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);
//bluring the differnece image
cvSmooth(differenceImg, differenceImg, CV_BLUR);
//apply threshold to discard small unwanted movements
cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);
//find contours
cv::Mat diffImg = cv::cvarrToMat(differenceImg);
cv::Mat currFrame = cv::cvarrToMat(currentFrame);
findSquares(diffImg, squares);
//draw bounding box around each contour
drawSquares(currFrame, squares);
//display colour image with bounding box
cvShowImage("Output Image", currentFrame);
//display threshold image
cvShowImage("Difference image", differenceImg);
//New Background
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//clear memory and contours
cvClearMemStorage( storage );
contours = 0;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy the image & movies objects
cvReleaseImage(&oldFrame_grey);
cvReleaseImage(&differenceImg);
cvReleaseImage(¤tFrame);
cvReleaseImage(¤tFrame_grey);
return 0;
}
As the error message says, your problem is in cv::mixChannels(). See documentation.
Or you could simply do something like
cv::Mat channels[3];
cv::split(multiChannelImage, channels);
and then access each channel using
cv::Mat currChannel = channels[channelNumber]
I'm trying to use Harris Corner detection algorithm of OpenCV to find corners in an image. I want to track it across consecutive frames using Lucas-Kanade Pyramidal Optical flow.
I have this C++ code, which doesn't seem to work for some reason:
#include <stdio.h>
#include "opencv2/core/core.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/contrib/contrib.hpp"
#include "opencv2/opencv.hpp"
using namespace cv;
using namespace std;
void main()
{
Mat img1, img2;
Mat disp1, disp2;
int thresh = 200;
vector<Point2f> left_corners;
vector<Point2f> right_corners;
vector<unsigned char> status;
vector<float> error;
Size s;
s.height = 400;
s.width = 400;
img1 = imread("D:\\img_l.jpg",0);
img2 = imread("D:\\img_r.jpg",0);
resize(img2, img2, s, 0, 0, INTER_CUBIC);
resize(img1, img1, s, 0, 0, INTER_CUBIC);
disp1 = Mat::zeros( img1.size(), CV_32FC1 );
disp2 = Mat::zeros( img2.size(), CV_32FC1 );
int blockSize = 2;
int apertureSize = 3;
double k = 0.04;
cornerHarris( img1, disp1, blockSize, apertureSize, k, BORDER_DEFAULT );
normalize( disp1, disp1, 0, 255, NORM_MINMAX, CV_32FC1, Mat() );
for( int j = 0; j < disp1.size().height ; j++ )
{
for( int i = 0; i < disp1.size().width; i++ )
{
if( (int) disp1.at<float>(j,i) > thresh )
{
left_corners.push_back(Point2f( j, i ));
}
}
}
right_corners.resize(left_corners.size());
calcOpticalFlowPyrLK(img1,img2,left_corners,right_corners,status,error, Size(11,11),5);
printf("Vector size : %d",left_corners.size());
waitKey(0);
}
When I run it, I get the following error message:
Microsoft Visual Studio C Runtime Library has detected a fatal error in OpenCVTest.exe.
(OpenCVTest being the name of my project)
OpenCV Error: Assertion failed ((npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0) in unknown function, file ..\..\OpenCV-2.3.0-win-src\OpenCV-2.3.0\modules\video\src\lkpyramid.cpp, line 71
I have been trying to debug this from yesterday, but in vain. Please help.
As we can see in the source code, this error is thrown if the previous points array is in someway faulty. Exactly what makes it bad is hard to say since the documentation for checkVector is a bit sketchy. You can still look at the code to find out.
But my guess is that your left_corners variable have either the wrong type (not CV_32F) or the wrong shape.
i have an image like:
i want to remove the black rows and cols round the number.
So i want that the result is:
i try this:
void findX(IplImage* imgSrc,int* min, int* max){
int i;
int minFound=0;
CvMat data;
CvScalar maxVal=cvRealScalar(imgSrc->width * 255);
CvScalar val=cvRealScalar(0);
//For each col sum, if sum < width*255 then we find the min
//then continue to end to search the max, if sum< width*255 then is new max
for (i=0; i< imgSrc->width; i++){
cvGetCol(imgSrc, &data, i);
val= cvSum(&data);
if(val.val[0] < maxVal.val[0]){
*max= i;
if(!minFound){
*min= i;
minFound= 1;
}
}
}
}
void findY(IplImage* imgSrc,int* min, int* max){
int i;
int minFound=0;
CvMat data;
CvScalar maxVal=cvRealScalar(imgSrc->width * 255);
CvScalar val=cvRealScalar(0);
//For each col sum, if sum < width*255 then we find the min
//then continue to end to search the max, if sum< width*255 then is new max
for (i=0; i< imgSrc->height; i++){
cvGetRow(imgSrc, &data, i);
val= cvSum(&data);
if(val.val[0] < maxVal.val[0]){
*max=i;
if(!minFound){
*min= i;
minFound= 1;
}
}
}
}
CvRect findBB(IplImage* imgSrc){
CvRect aux;
int xmin, xmax, ymin, ymax;
xmin=xmax=ymin=ymax=0;
findX(imgSrc, &xmin, &xmax);
findY(imgSrc, &ymin, &ymax);
aux=cvRect(xmin, ymin, xmax-xmin, ymax-ymin);
//printf("BB: %d,%d - %d,%d\n", aux.x, aux.y, aux.width, aux.height);
return aux;
}
So i use:
IplImage *my_image = cvLoad....
CvRect bb = findBB(my_image);
IplImage *new_image = cvCreateImage(cvSize(bb.width,bb.height), my_image->depth, 1);
cvShowImage("test",new_image);
it doesn't work good, cause i try to check if in new image there are black rows or cols and they are present. what can i do? can someone help me? (sorry for my english!)
One way to do it is to simply execute the bounding box technique to detect the digit, as illustrated by the image below:
Since your image is already processed the bounding box technique I use is a lot simpler.
After that procedure, all you really need to do is set the ROI (Region of Interest) of the original image to the area defined by the box to achieve the crop effect and isolate the object:
Notice that in the resulting image there is one extra row/column of pixels in the border that are not white. Well, they are not black either. That's because I didn't performed any threshold method to binarize the image to black and white. The code below demonstrates the bounding box technique being executed on a grayscale version of the image.
This is pretty much the roadmap to achieve what you want. For educational purposes I'm sharing the code I wrote using the C++ interface of OpenCV. I'm sure you are capable of converting it to the C interface.
#include <cv.h>
#include <highgui.h>
#include <vector>
int main(int argc, char* argv[])
{
cv::Mat img = cv::imread(argv[1]);
// Convert RGB Mat to GRAY
cv::Mat gray;
cv::cvtColor(img, gray, CV_BGR2GRAY);
// Store the set of points in the image before assembling the bounding box
std::vector<cv::Point> points;
cv::Mat_<uchar>::iterator it = gray.begin<uchar>();
cv::Mat_<uchar>::iterator end = gray.end<uchar>();
for (; it != end; ++it)
{
if (*it) points.push_back(it.pos());
}
// Compute minimal bounding box
cv::RotatedRect box = cv::minAreaRect(cv::Mat(points));
// Draw bounding box in the original image (debug purposes)
//cv::Point2f vertices[4];
//box.points(vertices);
//for (int i = 0; i < 4; ++i)
//{
//cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(0, 255, 0), 1, CV_AA);
//}
//cv::imshow("box", img);
//cv::imwrite("box.png", img);
// Set Region of Interest to the area defined by the box
cv::Rect roi;
roi.x = box.center.x - (box.size.width / 2);
roi.y = box.center.y - (box.size.height / 2);
roi.width = box.size.width;
roi.height = box.size.height;
// Crop the original image to the defined ROI
cv::Mat crop = img(roi);
cv::imshow("crop", crop);
cv::imwrite("cropped.png", crop);
cvWaitKey(0);
return 0;
}