I am trying to do an image registration with ORB feature.
I got a problem at using warpAffine. The compiler told that it is not possible to convert parameter '1' from cv::Mat * to cv::InputArray.
Here is my code:
#pragma once
// Standard C++ I/O library.
#include <iostream>
#include <string>
#include <iomanip>
#include <vector>
// OpenCV library.
#include <cv.h>
#include <highgui.h>
// OpenCV feature library.
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <nonfree/features2d.hpp>
// main().
int main(int argv, char ** argc)
{
cv::Mat im_ref, im_cmp;
std::string str_ref, str_cmp;
// Read reference image.
//std::cout<<"Input reference image filename: ";
//std::cin>>str_ref;
std::cout<<"-> Reading images."<<std::endl;
str_ref = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\206.png";
im_ref = cv::imread(str_ref);
cv::imshow("Reference image", im_ref);
// Read testing image.
//std::cout<<"Input testing image filename: ";
//std::cin>>str_cmp;
str_cmp = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\227.png";
im_cmp = cv::imread(str_cmp);
cv::imshow("Testing image", im_cmp);
std::cout<<"Press any key to continue."<<std::endl;
cvWaitKey(0);
// Feature detection.
std::cout<<"-> Feature detection."<<std::endl;
std::vector <cv::KeyPoint> key_ref, key_cmp; // Vectors for features extracted from reference and testing images.
cv::Mat des_ref, des_cmp; // Descriptors for features of 2 images.
cv::ORB orb1; // An ORB object.
orb1(im_ref, cv::Mat(), key_ref, des_ref); // Feature extraction.
orb1(im_cmp, cv::Mat(), key_cmp, des_cmp);
// Show keypoints.
std::cout<<"-> Show keypoints."<<std::endl;
cv::Mat drawkey_ref, drawkey_cmp; // Output image for keypoint drawing.
cv::drawKeypoints(im_ref, key_ref, drawkey_ref); // Generate image for keypoint drawing.
cv::imshow("Keypoints of reference", drawkey_ref);
cv::drawKeypoints(im_cmp, key_cmp, drawkey_cmp);
cv::imshow("Keypoints of test", drawkey_cmp);
cvWaitKey(0);
// Matching.
std::cout<<"-> Matching."<<std::endl;
cv::FlannBasedMatcher matcher1(new cv::flann::LshIndexParams(20,10,2));
std::vector<cv::DMatch> matches1;
matcher1.match(des_ref, des_cmp, matches1); // Match two sets of features.
double max_dist = 0;
double min_dist = 100;
// Find out the minimum and maximum of all distance.
for( int i = 0; i < des_ref.rows; i++ )
{
double dist = matches1[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
cvWaitKey(0);
// Eliminate relatively bad points.
std::cout<<"-> Bad points elimination"<<std::endl;
std::vector<cv::KeyPoint> kgood_ref, kgood_cmp;
std::vector<cv::DMatch> goodMatch;
for (int i=0; i<matches1.size(); i++)
{
if(matches1[i].distance < 2*min_dist) // Keep points that are less than 2 times of the minimum distance.
{
goodMatch.push_back(matches1[i]);
kgood_ref.push_back(key_ref[i]);
kgood_cmp.push_back(key_cmp[i]);
} // end if
} // end for
cvWaitKey(0);
// Calculate affine transform matrix.
std::cout<<"-> Calculating affine transformation."<<std::endl;
std::vector<cv::Point2f> frm1_feature, frm2_feature;
const int p_size = goodMatch.size();
// * tmpP = new tmpPoint[p_size];
cv::Point2f tmpP;
for(int i=0; i<goodMatch.size(); i++)
{
tmpP.x = kgood_ref[i].pt.x;
tmpP.y = kgood_ref[i].pt.y;
frm1_feature.push_back(tmpP);
tmpP.x = kgood_cmp[i].pt.x;
tmpP.y = kgood_cmp[i].pt.y;
frm2_feature.push_back(tmpP);
}
cv::Mat affine_mat = cv::estimateRigidTransform(frm1_feature, frm2_feature, true);
cv::Mat im_transformed;
// Output results.
cv::warpAffine(&im_cmp, &im_transformed, affine_mat, CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS); // error comes from here.
cv::imshow("Transformed image", im_transformed);
cvWaitKey(0);
return 0;
}
I have got the result before using the answer given by Evgeniy.
The transform I had used is
//cv::warpAffine( im_cmp, im_transformed, affine_mat, cv::Size(im_cmp.cols, im_cmp.rows) );
The transformed result is quite strange
What I want to do is finally get a merged image of both the reference image and this transformed image. This is actually my first step. Is this the problem of using the transformation parameter of the warpAffine().
Finally, I want to get a result like an example here (two images taken at difference position and they are finally aligned)
You are giving a pointer, but wrapAffine accepts reference to a cv::Mat.
You can change your code like this:
cv::warpAffine(im_cmp, im_transformed, affine_mat, cv::Size(), CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS);
Just remove '&'
Related
I've been working with OpenCV to stitch two images together on a Raspberry Pi and on a Windows OS based PC.
#include <stdio.h>
#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
int main (int argc, char** argv) {
Mat image_1 = imread (argv[1]);
Mat image_2 = imread (argv[2]);
Mat gray_image_1;
Mat gray_image_2;
cvtColor (image_1, gray_image_1, CV_RGB2GRAY);
cvtColor (image_2, gray_image_2, CV_RGB2GRAY);
// Check if image files can be read
if (!gray_image_1.data) {
std::cout << "Error Reading Image 1" << std::endl;
return 0;
}
if (!gray_image_2.data) {
std::cout << "Error Reading Image 2" << std::endl;
return 0;
}
// Detect the keypoints using SURF Detector
// Based from Anna Huaman's 'Features2D + Homography to find a known object' Tutorial
int minHessian = 50;
SurfFeatureDetector detector (minHessian);
std::vector <KeyPoint> keypoints_object, keypoints_scene;
detector.detect (gray_image_2, keypoints_object);
detector.detect (gray_image_1, keypoints_scene);
// Calculate Feature Vectors (descriptors)
// Based from Anna Huaman's 'Features2D + Homography to find a known object' Tutorial
SurfDescriptorExtractor extractor;
Mat descriptors_object, descriptors_scene;
extractor.compute (gray_image_2, keypoints_object, descriptors_object);
extractor.compute (gray_image_1, keypoints_scene, descriptors_scene);
// Matching descriptor vectors using FLANN matcher
// Based from Anna Huaman's 'Features2D + Homography to find a known object' Tutorial
FlannBasedMatcher matcher;
std::vector <DMatch> matches;
matcher.match (descriptors_object, descriptors_scene, matches);
double max_dist = 0;
double min_dist = 100;
// Quick calculation of max and min distances between keypoints
// Based from Anna Huaman's 'Features2D + Homography to find a known object' Tutorial
for (int i = 0; i < descriptors_object.rows; i++) {
double dist = matches[i].distance;
if (dist < min_dist) {
min_dist = dist;
}
}
// Use matches that have a distance that is less than 3 * min_dist
std::vector <DMatch> good_matches;
for (int i = 0; i < descriptors_object.rows; i++){
if (matches[i].distance < 3 * min_dist) {
good_matches.push_back (matches[i]);
}
}
std::vector <Point2f> obj;
std::vector <Point2f> scene;
for (int i = 0; i < good_matches.size(); i++) {
// Get the keypoints from the good matches
obj.push_back (keypoints_object[good_matches[i].queryIdx].pt);
scene.push_back (keypoints_scene[good_matches[i].trainIdx].pt);
}
// Find the Homography Matrix
Mat H = findHomography (obj, scene, CV_RANSAC);
// Use the Homography Matrix to warp the images
cv::Mat result;
warpPerspective (image_2, result, H, cv::Size (image_2.cols + image_1.cols, image_2.rows));
cv::Mat half (result, cv::Rect (0, 0, image_1.cols, image_1.rows));
image_1.copyTo (half);
// Write image
imwrite("Update.jpg", result);
waitKey (0);
return 0;
}
The two images I use as inputs result in success. But, only when those two images have resolutions of <= 1080 * 1080 pixels.
For 1440 * 1440 and 1944 * 1944 resolutions I found that the findHomography couldn't function because I was no longer getting more than 3 good matches. findHomography needs at least 4 good matches.
I have tried...
cv::resize(the input images) - results in no resolution size images producing enough good matches for the findHomography.
min Hessian increased or decreased - no change
minimum distance increased or decreased - no change
Note: Both images overlap and have the same dimensions.
Does anyone have a solution to this problem? I have spent a few hours researching this issue and only being lead to the conclusion that OpenCV Image Stitching cannot process high resolution images.
Below I'll include two high resolution images for anyone wishing to help.
colour_1_1440
colour_2_1440
I was using OpenCV 2.4.13 and not the new OpenCV 3.1.0.
Based from Martin Matilla's comment:
"are you sure you are not discarding good matches in the distance filter section? if (matches[i].distance < 3 * min_dist)" – Martin Matilla 53 mins ago
The solution did lie at 3 * min_dist. I changed the value '3' to '4' to allow for high resolution images to be processed.
Note: Originally I changed '3' to '30' and found that the 2nd input image was distorted as expected. <- Just to let anyone know :)
I want to increase the contrast of the bellow picture, with opencv c++.
I use histogram processing techniques e.g., histogram equalization (HE), histogram specification, etc. But I don't reaches to good result such as bellow images:
What ideas on how to solve this task would you suggest? Or on what resource on the internet can I find help?
I found a useful subject on OpenCV for changing image contrast :
#include <cv.h>
#include <highgui.h>
#include <iostream>
using namespace cv;
double alpha; /**< Simple contrast control */
int beta; /**< Simple brightness control */
int main( int argc, char** argv )
{
/// Read image given by user
Mat image = imread( argv[1] );
Mat new_image = Mat::zeros( image.size(), image.type() );
/// Initialize values
std::cout<<" Basic Linear Transforms "<<std::endl;
std::cout<<"-------------------------"<<std::endl;
std::cout<<"* Enter the alpha value [1.0-3.0]: ";std::cin>>alpha;
std::cout<<"* Enter the beta value [0-100]: "; std::cin>>beta;
/// Do the operation new_image(i,j) = alpha*image(i,j) + beta
for( int y = 0; y < image.rows; y++ )
{ for( int x = 0; x < image.cols; x++ )
{ for( int c = 0; c < 3; c++ )
{
new_image.at<Vec3b>(y,x)[c] =
saturate_cast<uchar>( alpha*( image.at<Vec3b>(y,x)[c] ) + beta );
}
}
}
/// Create Windows
namedWindow("Original Image", 1);
namedWindow("New Image", 1);
/// Show stuff
imshow("Original Image", image);
imshow("New Image", new_image);
/// Wait until user press some key
waitKey();
return 0;
}
See: Changing the contrast and brightness of an image!
I'm no expert but you could try to reduce the number of colours by merging grays into darker grays, and light grays into whites.
E.g.:
Find the least common colour in <0.0, 0.5) range, merge it towards black.
Find the least common colour in <0.5, 1.0> range, merge it towards white.
This would reduce the number of colours and help create a gap between brigher darker colours maybe.
This might be late, but you can try createCLAHE() function in openCV. Works fine for me.
Iam trying to recognize a source image(c1.jpg- a face) in a bigger destination image(allimg.jpg-containing 3 faces) using the ORB detector/descriptor and Flann or brute Force matcher. c1.jpg was created from allimg.jpg by cropping/copying from it.
The ORB detector/descriptor work as expected returning detectors/descriptors correctly BUT the Flann or brute Force matcher give incorrect matching results for the destination.As a result when further I try to use findHomography(),it shows incorrect result, mapping source to somewhere else on the destination instead of the correct face in the destination(allimg).
Although not shown the code below,after Knnmatch,I drew a bounding rect on c1.jpg and allimag.jpg after the matches and displayed the images.I found that the source bounding rect was correct but the bounding rect of the allimag was quite very big and including the source face .It should have just found the source face in the destination.
Iam using opencv 3.0.
Did anyone face such Problems? Are there any other matchers which accurately finds the source image(face or anything) in the destination?
I have given the code below and the images(given by links):
#include <opencv2/core/core.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
using namespace std;
using namespace cv;
const double nn_match_ratio = 0.80f; // Nearest-neighbour matching ratio
const double ransac_thresh = 2.5f; // RANSAC inlier threshold
const int bb_min_inliers = 100; // Minimal number of inliers to draw BBox
Mat img1;
Mat img2;
bool refineMatchesWithHomography(const vector<cv::KeyPoint>& queryKeypoints,
const vector<cv::KeyPoint>& trainKeypoints,
float reprojectionThreshold,
vector<cv::DMatch>& matches,
Mat& homography )
{
const int minNumberMatchesAllowed = 4;
if (matches.size() <minNumberMatchesAllowed)
return false;
// Prepare data for cv::findHomography
vector<cv::Point2f> queryPoints(matches.size());
std::vector<cv::Point2f> trainPoints(matches.size());
for (size_t i = 0; i <matches.size(); i++)
{
queryPoints[i] = queryKeypoints[matches[i].queryIdx].pt;
trainPoints[i] = trainKeypoints[matches[i].trainIdx].pt;
}
// Find homography matrix and get inliers mask
std::vector<unsigned char> inliersMask(matches.size());
homography = findHomography(queryPoints,
trainPoints,
CV_FM_RANSAC,
reprojectionThreshold,
inliersMask);
vector<cv::DMatch> inliers;
for (size_t i=0; i<inliersMask.size(); i++)
{
if (inliersMask[i])
inliers.push_back(matches[i]);
}
matches.swap(inliers);
Mat homoShow;
drawMatches (img1,queryKeypoints,img2,trainKeypoints,matches,homoShow,
Scalar::all(-1),CV_RGB(255,255,255), Mat(), 2);
imshow("homoShow",homoShow);
waitKey(100000);
return matches.size() > minNumberMatchesAllowed;
}
int main()
{
//Stats stats;
vector<String> fileName;
fileName.push_back("D:\\pmn\\c1.jpg");
fileName.push_back("D:\\pmn\\allimg.jpg");
img1 = imread(fileName[0], CV_LOAD_IMAGE_COLOR);
img2 = imread(fileName[1], CV_LOAD_IMAGE_COLOR);
if (img1.rows*img1.cols <= 0)
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
}
if (img2.rows*img2.cols <= 0)
{
cout << "Image " << fileName[1] << " is empty or cannot be found\n";
return(0);
}
// keypoint for img1 and img2
vector<KeyPoint> keyImg1, keyImg2;
// Descriptor for img1 and img2
Mat descImg1, descImg2;
Ptr<Feature2D> porb = ORB::create(500,1.2f,8,0,0,2,0,14);
porb->detect(img2, keyImg2, Mat());
// and compute their descriptors with method compute
porb->compute(img2, keyImg2, descImg2);
// We can detect keypoint with detect method
porb->detect(img1, keyImg1,Mat());
// and compute their descriptors with method compute
porb->compute(img1, keyImg1, descImg1);
//FLANN parameters
// Ptr<flann::IndexParams> indexParams =
makePtr<flann::LshIndexParams> (6, 12, 1);
// Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>
(50);
String itMatcher = "BruteForce-L1";
Ptr<DescriptorMatcher>
matdescriptorMatchercher(newcv::BFMatcher(cv::NORM_HAMMING, false));
vector<vector<DMatch> > matches,bestMatches;
vector<DMatch> m;
matdescriptorMatchercher->knnMatch(descImg1, descImg2, matches,2);
const float minRatio = 0.95f;//1.f / 1.5f;
for (int i = 0; i<matches.size(); i++)
{
if(matches[i].size()>1)
{
DMatch& bestMatch = matches[i][0];
DMatch& betterMatch = matches[i][1];
float distanceRatio = bestMatch.distance / betterMatch.distance;
if (distanceRatio <minRatio)
{
bestMatches.push_back(matches[i]);
m.push_back(bestMatch);
}
}
}
Mat homo;
float homographyReprojectionThreshold = 1.0;
bool homographyFound = refineMatchesWithHomography(
keyImg1,keyImg2,homographyReprojectionThreshold,m,homo);
return 0;
}
[c1.jpg][1]
[allimg.jpg][2]
[1]: http://i.stack.imgur.com/Uuy3o.jpg
[2]: http://i.stack.imgur.com/Kwne7.jpg
Thanks EdChum. I used the code given at the link(ratiotest/symmetrytest) and it provided with somewhat ok image matching only if the sourceimage was part of the destination, though it is not accurate enough. Note that I did commented out the last ransacTest as it was removing lot of positives unnecessarily.
I have attached the 2 images(source.jpg/destination.jpg) which will show what Iam saying by highlighting the matched part in destination.
Is there any algorithm which will still more accurately/correctly (>90%) identify the source in destination?
Also if the source is a similar image(and not exact as in destination),I found that the destination image matching is way off and useless. Am I right?
Kindly share your view.
1=source,2=destination
I am processing such an image as shown in Fig.1, which is composed of an array of points and required to convert to Fig. 2.
Fig.1 original image
Fig.2 wanted image
In order to finish the conversion, firstly I detect the edge of every point and then operate dilation. The result is satisfactory after choosing the proper parameters, seen in Fig. 3.
Fig.3 image after dilation
I processed the same image before in MATLAB. When it comes to shrink objects (in Fig.3) to pixels, function bwmorph(Img,'shrink',Inf) works and the result is exactly where Fig. 2 comes from. So how to get the same wanted image in opencv? It seems that there is no similar shrink function.
Here is my code of finding edge and dilation operation:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
using namespace cv;
// Global variables
Mat src, dilation_dst;
int dilation_size = 2;
int main(int argc, char *argv[])
{
IplImage* img = cvLoadImage("c:\\001a.bmp", 0); // 001a.bmp is Fig.1
// Perform canny edge detection
cvCanny(img, img, 33, 100, 3);
// IplImage to Mat
Mat imgMat(img);
src = img;
// Create windows
namedWindow("Dilation Demo", CV_WINDOW_AUTOSIZE);
Mat element = getStructuringElement(2, // dilation_type = MORPH_ELLIPSE
Size(2*dilation_size + 1, 2*dilation_size + 1),
Point(dilation_size, dilation_size));
// Apply the dilation operation
dilate(src, dilation_dst, element);
imwrite("c:\\001a_dilate.bmp", dilation_dst);
imshow("Dilation Demo", dilation_dst);
waitKey(0);
return 0;
}
1- Find all the contours in your image.
2- Using moments find their center of masses. Example:
/// Get moments
vector<Moments> mu(contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mu[i] = moments( contours[i], false ); }
/// Get the mass centers:
vector<Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ mc[i] = Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 ); }
3- Create zero(black) image and write all the center points on it.
4- Note that you will have extra one or two points coming from border contours. Maybe you can apply some pre-filtering according to the contour areas, since the border is a big connected contour having large area.
It's not very fast, but I implemented the morphological filtering algorithm from Digital Image Processing, 4th Edition by William K. Pratt. This should be exactly what you're looking for.
The code is MIT licensed and available on GitHub at cgmb/shrink.
Specifically, I've defined cv::Mat cgmb::shrink_max(cv::Mat in) to shrink a given cv::Mat of CV_8UC1 type until no further shrinking can be done.
So, if we compile Shrink.cxx with your program and change your code like so:
#include "Shrink.h" // add this line
...
dilate(src, dilation_dst, element);
dilation_dst = cgmb::shrink_max(dilation_dst); // and this line
imwrite("c:\\001a_dilate.bmp", dilation_dst);
We get this:
By the way, your image revealed a bug in Octave Image's implementation of bwmorph shrink. Figure 2 should not be the result of a shrink operation on Figure 3, as the ring shouldn't be broken by a shrink operation. If that ring disappeared in MATLAB, it presumably also suffers from some sort of similar bug.
At present, Octave and I have slightly different results from MATLAB, but they're pretty close.
I'm approaching a task of Bio Informatics, and need to extract some features from some cell images.
I used SIFT algorithm to extract Key Points inside of the image, as you can see in the picture.
As you can also see in the picture (circled in red), some key points are outliers and I don't want to calculate any feature on them.
I obtained the cv::KeyPoint vector with the following code:
const cv::Mat input = cv::imread("/tmp/image.jpg", 0); //Load as grayscale
cv::SiftFeatureDetector detector;
std::vector<cv::KeyPoint> keypoints;
detector.detect(input, keypoints);
but I would like to discard from the vector all those key points that, say for example, have less than 3 key points inside of a certain region of interest (ROI) centred on them in the image.
Therefore I need to implement a function returning the number of key points inside of a certain ROI given as input:
int function_returning_number_of_key_points_in_ROI( cv::KeyPoint, ROI );
//I have not specified ROI on purpose...check question 3
I have three questions:
Is there any existing function doing something similar?
If not can you give me some help in understanding how to implement it by myself?
Would you use a circular, or rectangular ROI for this task?And how would you specify it in input?
Note:
I forgot to specify that I would like an efficient implementation for the function, i.e. checking for each key point the relative position of all others with respect to it would not be a good solution (if there exists another way of doing).
I decided to go with the statistical route, but this may not work if you have multiple cells in view.
My solution is fairly straightforward:
Compute the keypoint locations
Find the centroid of the keypoint spatial locations
Compute the Euclidean distance of all points to the centroid
Filter original keypoints by distance < mu + 2*sigma
Here is the image that I get using this algorithm (keypoints == green, centroid == red):
Finally, here is the code example of how I did it:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
void distanceFromCentroid(const vector<Point2f>& points, Point2f centroid, vector<double>& distances)
{
vector<Point2f>::const_iterator point;
for(point = points.begin(); point != points.end(); ++point)
{
double distance = std::sqrt((point->x - centroid.x)*(point->x - centroid.x) + (point->y - centroid.y)*(point->y - centroid.y));
distances.push_back(distance);
}
}
int main(int argc, char* argv[])
{
Mat input = imread("cell.jpg", 0); //Load as grayscale
SiftFeatureDetector detector;
vector<cv::KeyPoint> keypoints;
detector.detect(input, keypoints);
vector<Point2f> points;
vector<KeyPoint>::iterator keypoint;
for(keypoint = keypoints.begin(); keypoint != keypoints.end(); ++keypoint)
{
points.push_back(keypoint->pt);
}
Moments m = moments(points, true);
Point2f centroid(m.m10 / m.m00, m.m01 / m.m00);
vector<double> distances;
distanceFromCentroid(points, centroid, distances);
Scalar mu, sigma;
meanStdDev(distances, mu, sigma);
cout << mu.val[0] << ", " << sigma.val[0] << endl;
vector<KeyPoint> filtered;
vector<double>::iterator distance;
for(size_t i = 0; i < distances.size(); ++i)
{
if(distances[i] < (mu.val[0] + 2.0*sigma.val[0]))
{
filtered.push_back(keypoints[i]);
}
}
Mat out = input.clone();
drawKeypoints(input, filtered, out, Scalar(0, 255, 0));
circle(out, centroid, 7, Scalar(0, 0, 255), 1);
imshow("kpts", out);
waitKey();
imwrite("statFilter.png", out);
return 0;
}
Hope that helps!