Discard outlier SIFT Key Points in Cell Image with OpenCV - c++

I'm approaching a task of Bio Informatics, and need to extract some features from some cell images.
I used SIFT algorithm to extract Key Points inside of the image, as you can see in the picture.
As you can also see in the picture (circled in red), some key points are outliers and I don't want to calculate any feature on them.
I obtained the cv::KeyPoint vector with the following code:
const cv::Mat input = cv::imread("/tmp/image.jpg", 0); //Load as grayscale
cv::SiftFeatureDetector detector;
std::vector<cv::KeyPoint> keypoints;
detector.detect(input, keypoints);
but I would like to discard from the vector all those key points that, say for example, have less than 3 key points inside of a certain region of interest (ROI) centred on them in the image.
Therefore I need to implement a function returning the number of key points inside of a certain ROI given as input:
int function_returning_number_of_key_points_in_ROI( cv::KeyPoint, ROI );
//I have not specified ROI on purpose...check question 3
I have three questions:
Is there any existing function doing something similar?
If not can you give me some help in understanding how to implement it by myself?
Would you use a circular, or rectangular ROI for this task?And how would you specify it in input?
Note:
I forgot to specify that I would like an efficient implementation for the function, i.e. checking for each key point the relative position of all others with respect to it would not be a good solution (if there exists another way of doing).

I decided to go with the statistical route, but this may not work if you have multiple cells in view.
My solution is fairly straightforward:
Compute the keypoint locations
Find the centroid of the keypoint spatial locations
Compute the Euclidean distance of all points to the centroid
Filter original keypoints by distance < mu + 2*sigma
Here is the image that I get using this algorithm (keypoints == green, centroid == red):
Finally, here is the code example of how I did it:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <iostream>
#include <vector>
using namespace cv;
using namespace std;
void distanceFromCentroid(const vector<Point2f>& points, Point2f centroid, vector<double>& distances)
{
vector<Point2f>::const_iterator point;
for(point = points.begin(); point != points.end(); ++point)
{
double distance = std::sqrt((point->x - centroid.x)*(point->x - centroid.x) + (point->y - centroid.y)*(point->y - centroid.y));
distances.push_back(distance);
}
}
int main(int argc, char* argv[])
{
Mat input = imread("cell.jpg", 0); //Load as grayscale
SiftFeatureDetector detector;
vector<cv::KeyPoint> keypoints;
detector.detect(input, keypoints);
vector<Point2f> points;
vector<KeyPoint>::iterator keypoint;
for(keypoint = keypoints.begin(); keypoint != keypoints.end(); ++keypoint)
{
points.push_back(keypoint->pt);
}
Moments m = moments(points, true);
Point2f centroid(m.m10 / m.m00, m.m01 / m.m00);
vector<double> distances;
distanceFromCentroid(points, centroid, distances);
Scalar mu, sigma;
meanStdDev(distances, mu, sigma);
cout << mu.val[0] << ", " << sigma.val[0] << endl;
vector<KeyPoint> filtered;
vector<double>::iterator distance;
for(size_t i = 0; i < distances.size(); ++i)
{
if(distances[i] < (mu.val[0] + 2.0*sigma.val[0]))
{
filtered.push_back(keypoints[i]);
}
}
Mat out = input.clone();
drawKeypoints(input, filtered, out, Scalar(0, 255, 0));
circle(out, centroid, 7, Scalar(0, 0, 255), 1);
imshow("kpts", out);
waitKey();
imwrite("statFilter.png", out);
return 0;
}
Hope that helps!

Related

OpenCV - warpPerspective

I'm trying to use the function "warpPerspective" with OpenCV 3.0. I'm using this example:
http://answers.opencv.org/question/98110/how-do-i-stitch-images-with-two-different-angles/
I have to create a ROI on the right side of the first image and another one on the left side of the second image. Use ORB to extract and compute descriptions and match these ones. I didn't changed much of the original code. Just the ROI.
The problem is that every image that i try to warp the perspective comes out like this:
I already tried with multiple pairs of images and the problem persists.
#include "opencv2/opencv.hpp"
#include <iostream>
#include <fstream>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
Mat img1 = imread("image2.jpg");
Mat img2 = imread("image1.jpg");
namedWindow("I2", WINDOW_NORMAL); namedWindow("I1", WINDOW_NORMAL);
Ptr<ORB> o1 = ORB::create();
Ptr<ORB> o2 = ORB::create();
vector<KeyPoint> pts1, pts2;
Mat desc1, desc2;
vector<DMatch> matches;
Size s = img1.size();
Size s2 = img2.size();
Rect r1(s.width - 200, 0, 200, s.height);
//rectangle(img1, r1, Scalar(255, 0, 0), 5);
Rect r2(0, 0, 200, s2.height);
//rectangle(img2, r2, Scalar(255, 0, 0), 5);
Mat mask1 = Mat::zeros(img1.size(), CV_8UC1);
Mat mask2 = Mat::zeros(img1.size(), CV_8UC1);
mask1(r1) = 1;
mask2(r2) = 1;
o1->detectAndCompute(img1, mask1, pts1, desc1);
o2->detectAndCompute(img2, mask2, pts2, desc2);
BFMatcher descriptorMatcher(NORM_HAMMING, true);
descriptorMatcher.match(desc1, desc2, matches, Mat());
// Keep best matches only to have a nice drawing.
// We sort distance between descriptor matches
Mat index;
int nbMatch = int(matches.size());
Mat tab(nbMatch, 1, CV_32F);
for (int i = 0; i<nbMatch / 2; i++)
{
tab.at<float>(i, 0) = matches[i].distance;
}
sortIdx(tab, index, SORT_EVERY_COLUMN + SORT_ASCENDING);
vector<DMatch> bestMatches;
vector<Point2f> src, dst;
for (int i = 0; i < nbMatch / 2; i++)
{
int j = index.at<int>(i, 0);
cout << pts1[matches[j].queryIdx].pt << "\t" << pts2[matches[j].trainIdx].pt << "\n";
src.push_back(pts1[matches[j].queryIdx].pt + Point2f(0, img1.rows)); // necessary offset
dst.push_back(pts2[matches[j].trainIdx].pt);
}
cout << "\n";
Mat h = findHomography(src, dst, RANSAC);
Mat result;
cout << h << endl;
warpPerspective(img2, result, h.inv(), Size(3 * img2.cols + img1.cols, 2 * img2.rows + img1.rows));
imshow("I1", img1);
imshow("I2", img2);
Mat roi1(result, Rect(0, img1.rows, img1.cols, img1.rows));
img1.copyTo(roi1);
namedWindow("I3", WINDOW_NORMAL);
imshow("I3", result);
imwrite("result.jpg", result);
waitKey();
return 0;
Does that comes from bad matches? Am i missing something? Since i'm kind of new to this topic, any help or ideas would be really appreciated.
Here's the quick things you need to check when your warp perspective is not working-
Did you select the right points in both the images ?
Reason: You need to choose exactly the same points that correspond to each
other when finding a perspective transform. Unrelated points ruin it.
Are your points in right order in the array ?
R: You need to put them in the right corresponding order in both the source and
destination before passing to findhomography.
Are you passing then in the right order to findHomography ? Try switching in
case you are not sure. So that is doesn't reverse warp it
Those are the mistakes i did when i first used it. Now if you see your images, there's a little part overlapping in both the images. You need to be more careful over there. Your rect mask might be the fault.

use warpAffine of OpenCV to do image registration

I am trying to do an image registration with ORB feature.
I got a problem at using warpAffine. The compiler told that it is not possible to convert parameter '1' from cv::Mat * to cv::InputArray.
Here is my code:
#pragma once
// Standard C++ I/O library.
#include <iostream>
#include <string>
#include <iomanip>
#include <vector>
// OpenCV library.
#include <cv.h>
#include <highgui.h>
// OpenCV feature library.
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <nonfree/features2d.hpp>
// main().
int main(int argv, char ** argc)
{
cv::Mat im_ref, im_cmp;
std::string str_ref, str_cmp;
// Read reference image.
//std::cout<<"Input reference image filename: ";
//std::cin>>str_ref;
std::cout<<"-> Reading images."<<std::endl;
str_ref = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\206.png";
im_ref = cv::imread(str_ref);
cv::imshow("Reference image", im_ref);
// Read testing image.
//std::cout<<"Input testing image filename: ";
//std::cin>>str_cmp;
str_cmp = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\227.png";
im_cmp = cv::imread(str_cmp);
cv::imshow("Testing image", im_cmp);
std::cout<<"Press any key to continue."<<std::endl;
cvWaitKey(0);
// Feature detection.
std::cout<<"-> Feature detection."<<std::endl;
std::vector <cv::KeyPoint> key_ref, key_cmp; // Vectors for features extracted from reference and testing images.
cv::Mat des_ref, des_cmp; // Descriptors for features of 2 images.
cv::ORB orb1; // An ORB object.
orb1(im_ref, cv::Mat(), key_ref, des_ref); // Feature extraction.
orb1(im_cmp, cv::Mat(), key_cmp, des_cmp);
// Show keypoints.
std::cout<<"-> Show keypoints."<<std::endl;
cv::Mat drawkey_ref, drawkey_cmp; // Output image for keypoint drawing.
cv::drawKeypoints(im_ref, key_ref, drawkey_ref); // Generate image for keypoint drawing.
cv::imshow("Keypoints of reference", drawkey_ref);
cv::drawKeypoints(im_cmp, key_cmp, drawkey_cmp);
cv::imshow("Keypoints of test", drawkey_cmp);
cvWaitKey(0);
// Matching.
std::cout<<"-> Matching."<<std::endl;
cv::FlannBasedMatcher matcher1(new cv::flann::LshIndexParams(20,10,2));
std::vector<cv::DMatch> matches1;
matcher1.match(des_ref, des_cmp, matches1); // Match two sets of features.
double max_dist = 0;
double min_dist = 100;
// Find out the minimum and maximum of all distance.
for( int i = 0; i < des_ref.rows; i++ )
{
double dist = matches1[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
cvWaitKey(0);
// Eliminate relatively bad points.
std::cout<<"-> Bad points elimination"<<std::endl;
std::vector<cv::KeyPoint> kgood_ref, kgood_cmp;
std::vector<cv::DMatch> goodMatch;
for (int i=0; i<matches1.size(); i++)
{
if(matches1[i].distance < 2*min_dist) // Keep points that are less than 2 times of the minimum distance.
{
goodMatch.push_back(matches1[i]);
kgood_ref.push_back(key_ref[i]);
kgood_cmp.push_back(key_cmp[i]);
} // end if
} // end for
cvWaitKey(0);
// Calculate affine transform matrix.
std::cout<<"-> Calculating affine transformation."<<std::endl;
std::vector<cv::Point2f> frm1_feature, frm2_feature;
const int p_size = goodMatch.size();
// * tmpP = new tmpPoint[p_size];
cv::Point2f tmpP;
for(int i=0; i<goodMatch.size(); i++)
{
tmpP.x = kgood_ref[i].pt.x;
tmpP.y = kgood_ref[i].pt.y;
frm1_feature.push_back(tmpP);
tmpP.x = kgood_cmp[i].pt.x;
tmpP.y = kgood_cmp[i].pt.y;
frm2_feature.push_back(tmpP);
}
cv::Mat affine_mat = cv::estimateRigidTransform(frm1_feature, frm2_feature, true);
cv::Mat im_transformed;
// Output results.
cv::warpAffine(&im_cmp, &im_transformed, affine_mat, CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS); // error comes from here.
cv::imshow("Transformed image", im_transformed);
cvWaitKey(0);
return 0;
}
I have got the result before using the answer given by Evgeniy.
The transform I had used is
//cv::warpAffine( im_cmp, im_transformed, affine_mat, cv::Size(im_cmp.cols, im_cmp.rows) );
The transformed result is quite strange
What I want to do is finally get a merged image of both the reference image and this transformed image. This is actually my first step. Is this the problem of using the transformation parameter of the warpAffine().
Finally, I want to get a result like an example here (two images taken at difference position and they are finally aligned)
You are giving a pointer, but wrapAffine accepts reference to a cv::Mat.
You can change your code like this:
cv::warpAffine(im_cmp, im_transformed, affine_mat, cv::Size(), CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS);
Just remove '&'

How to count white object on Binary Image?

I'm trying to count object from image. I use logs photo, and I use some steps to get a binary image.
This is my code:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <features2d.hpp>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
//load image
Mat img = imread("kayu.jpg", CV_LOAD_IMAGE_COLOR);
if(img.empty())
return -1;
//namedWindow( "kayu", CV_WINDOW_AUTOSIZE );
imshow("kayu", img);
//convert to b/w
Mat bw;
cvtColor(img, bw, CV_BGR2GRAY);
imshow("bw1", bw);
threshold(bw, bw, 40, 255, CV_THRESH_BINARY);
imshow("bw", bw);
//distance transform & normalisasi
Mat dist;
distanceTransform(bw, dist, CV_DIST_L2, 3);
normalize(dist, dist, 0, 2., NORM_MINMAX);
imshow("dist", dist);
//threshold to draw line
threshold(dist, dist, .5, 1., CV_THRESH_BINARY);
imshow("dist2", dist);
//dist = bw;
//dilasi
Mat dilation, erotion, element;
int dilation_type = MORPH_ELLIPSE;
int dilation_size = 17;
element = getStructuringElement(dilation_type, Size(2*dilation_size + 1, 2*dilation_size+1), Point(dilation_size, dilation_size ));
erode(dist, erotion, element);
int erotionCount = 0;
for(int i=0; i<erotionCount; i++){
erode(erotion, erotion, element);
}
imshow("erotion", erotion);
dilate(erotion, dilation, element);
imshow("dilation", dilation);
waitKey(0);
return 0;
}
As you can see, I use Erosion and Dilation to get better circular object of log. My problem is, I'm stuck at counting the object. I tried SimpleBlobDetector but I got nothing, because when I try to convert the result of "dilation" step to CV_8U, the white object disappear. I got error too when I use findContours(). It say something about channel of image. I can't show the error here, because that's too many step and I already delete it from my code.
Btw, at the end, i got 1 channel of image.
Can i just use it to counting, or am i have to convert it and what is the best method to do it?
Two simple steps:
Find contours for the binarized image.
Get the count of the contours.
Code:
int count_trees(const cv::Mat& bin_image){
cv::Mat img;
if(bin_image.channels()>1){
cv::cvtColor(bin_image,img,cv::COLOR_BGR2GRAY);
}
else{
img=bin_image.clone();;
}
if(img.type()!=CV_8UC1){
img*=255.f; //This could be stupid, but I do not have an environment to try it
img.convertTo(img,CV_8UC1);
}
std::vector<std::vector<cv::Point>> contours
std::vector<Vec4i> hierarchy;
cv::findContours( img, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
return contours.size();
}
I have the same problem, here's an idea I'm about to implement.
1) Represent your image as an array of integers; 0 = black, 1 = white.
2) set N = 2;
3) Scan your image, pixel-by-pixel. Whenever you find a white pixel, activate a flood-fill algorithm, starting at the pixel just found; paint the region with the value of N++;
4) Iterate 3 until you reach the last pixel. (N-2) is the number of regions found.
This method depends on the shape of the objects; mine are more chaotic than yours (wish me luck..). I'll make use of a recursive flood-fill recipe found somewhere (maybe Rosetta Code).
This solution also makes it easy to compute the size of each region.
try to apply that on the your deleted img
// count
for (int i = 0; i< contours.size(); i = hierarchy[i][0]) // iteration sur chaque contour .
{
Rect r = boundingRect(contours[i]);
if (hierarchy[i][2]<0) {
rectangle(canny_output, Point(r.x, r.y), Point(r.x + r.width, r.y + r.height), Scalar(20, 50, 255), 3, 8, 0);
count++;
}
}
cout << "Numeber of contour = " << count << endl;
imshow("src", src);
imshow("contour", dst);
waitKey(0);

Face/Image matching incorrectly done using KnnMatch of BruteForceMatcher or FlannBasedMatcher

Iam trying to recognize a source image(c1.jpg- a face) in a bigger destination image(allimg.jpg-containing 3 faces) using the ORB detector/descriptor and Flann or brute Force matcher. c1.jpg was created from allimg.jpg by cropping/copying from it.
The ORB detector/descriptor work as expected returning detectors/descriptors correctly BUT the Flann or brute Force matcher give incorrect matching results for the destination.As a result when further I try to use findHomography(),it shows incorrect result, mapping source to somewhere else on the destination instead of the correct face in the destination(allimg).
Although not shown the code below,after Knnmatch,I drew a bounding rect on c1.jpg and allimag.jpg after the matches and displayed the images.I found that the source bounding rect was correct but the bounding rect of the allimag was quite very big and including the source face .It should have just found the source face in the destination.
Iam using opencv 3.0.
Did anyone face such Problems? Are there any other matchers which accurately finds the source image(face or anything) in the destination?
I have given the code below and the images(given by links):
#include <opencv2/core/core.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
using namespace std;
using namespace cv;
const double nn_match_ratio = 0.80f; // Nearest-neighbour matching ratio
const double ransac_thresh = 2.5f; // RANSAC inlier threshold
const int bb_min_inliers = 100; // Minimal number of inliers to draw BBox
Mat img1;
Mat img2;
bool refineMatchesWithHomography(const vector<cv::KeyPoint>& queryKeypoints,
const vector<cv::KeyPoint>& trainKeypoints,
float reprojectionThreshold,
vector<cv::DMatch>& matches,
Mat& homography )
{
const int minNumberMatchesAllowed = 4;
if (matches.size() <minNumberMatchesAllowed)
return false;
// Prepare data for cv::findHomography
vector<cv::Point2f> queryPoints(matches.size());
std::vector<cv::Point2f> trainPoints(matches.size());
for (size_t i = 0; i <matches.size(); i++)
{
queryPoints[i] = queryKeypoints[matches[i].queryIdx].pt;
trainPoints[i] = trainKeypoints[matches[i].trainIdx].pt;
}
// Find homography matrix and get inliers mask
std::vector<unsigned char> inliersMask(matches.size());
homography = findHomography(queryPoints,
trainPoints,
CV_FM_RANSAC,
reprojectionThreshold,
inliersMask);
vector<cv::DMatch> inliers;
for (size_t i=0; i<inliersMask.size(); i++)
{
if (inliersMask[i])
inliers.push_back(matches[i]);
}
matches.swap(inliers);
Mat homoShow;
drawMatches (img1,queryKeypoints,img2,trainKeypoints,matches,homoShow,
Scalar::all(-1),CV_RGB(255,255,255), Mat(), 2);
imshow("homoShow",homoShow);
waitKey(100000);
return matches.size() > minNumberMatchesAllowed;
}
int main()
{
//Stats stats;
vector<String> fileName;
fileName.push_back("D:\\pmn\\c1.jpg");
fileName.push_back("D:\\pmn\\allimg.jpg");
img1 = imread(fileName[0], CV_LOAD_IMAGE_COLOR);
img2 = imread(fileName[1], CV_LOAD_IMAGE_COLOR);
if (img1.rows*img1.cols <= 0)
{
cout << "Image " << fileName[0] << " is empty or cannot be found\n";
return(0);
}
if (img2.rows*img2.cols <= 0)
{
cout << "Image " << fileName[1] << " is empty or cannot be found\n";
return(0);
}
// keypoint for img1 and img2
vector<KeyPoint> keyImg1, keyImg2;
// Descriptor for img1 and img2
Mat descImg1, descImg2;
Ptr<Feature2D> porb = ORB::create(500,1.2f,8,0,0,2,0,14);
porb->detect(img2, keyImg2, Mat());
// and compute their descriptors with method compute
porb->compute(img2, keyImg2, descImg2);
// We can detect keypoint with detect method
porb->detect(img1, keyImg1,Mat());
// and compute their descriptors with method compute
porb->compute(img1, keyImg1, descImg1);
//FLANN parameters
// Ptr<flann::IndexParams> indexParams =
makePtr<flann::LshIndexParams> (6, 12, 1);
// Ptr<flann::SearchParams> searchParams = makePtr<flann::SearchParams>
(50);
String itMatcher = "BruteForce-L1";
Ptr<DescriptorMatcher>
matdescriptorMatchercher(newcv::BFMatcher(cv::NORM_HAMMING, false));
vector<vector<DMatch> > matches,bestMatches;
vector<DMatch> m;
matdescriptorMatchercher->knnMatch(descImg1, descImg2, matches,2);
const float minRatio = 0.95f;//1.f / 1.5f;
for (int i = 0; i<matches.size(); i++)
{
if(matches[i].size()>1)
{
DMatch& bestMatch = matches[i][0];
DMatch& betterMatch = matches[i][1];
float distanceRatio = bestMatch.distance / betterMatch.distance;
if (distanceRatio <minRatio)
{
bestMatches.push_back(matches[i]);
m.push_back(bestMatch);
}
}
}
Mat homo;
float homographyReprojectionThreshold = 1.0;
bool homographyFound = refineMatchesWithHomography(
keyImg1,keyImg2,homographyReprojectionThreshold,m,homo);
return 0;
}
[c1.jpg][1]
[allimg.jpg][2]
[1]: http://i.stack.imgur.com/Uuy3o.jpg
[2]: http://i.stack.imgur.com/Kwne7.jpg
Thanks EdChum. I used the code given at the link(ratiotest/symmetrytest) and it provided with somewhat ok image matching only if the sourceimage was part of the destination, though it is not accurate enough. Note that I did commented out the last ransacTest as it was removing lot of positives unnecessarily.
I have attached the 2 images(source.jpg/destination.jpg) which will show what Iam saying by highlighting the matched part in destination.
Is there any algorithm which will still more accurately/correctly (>90%) identify the source in destination?
Also if the source is a similar image(and not exact as in destination),I found that the destination image matching is way off and useless. Am I right?
Kindly share your view.
1=source,2=destination

Find 4 specific corner pixels and use them with warp perspective

I'm playing around with OpenCV and I want to know how you would build a simple version of a perspective transform program. I have a image of a parallelogram and each corner of it consists of a pixel with a specific color, which is nowhere else in the image. I want to iterate through all pixels and find these 4 pixels. Then I want to use them as corner points in a new image in order to warp the perspective of the original image. In the end I should have a zoomed on square.
Point2f src[4]; //Is this the right datatype to use here?
int lineNumber=0;
//iterating through the pixels
for(int y = 0; y < image.rows; y++)
{
for(int x = 0; x < image.cols; x++)
{
Vec3b colour = image.at<Vec3b>(Point(x, y));
if(color.val[1]==245 && color.val[2]==111 && color.val[0]==10) {
src[lineNumber]=this pixel // something like Point2f(x,y) I guess
lineNumber++;
}
}
}
/* I also need to get the dst points for getPerspectiveTransform
and afterwards warpPerspective, how do I get those? Take the other
points, check the biggest distance somehow and use it as the maxlength to calculate
the rest? */
How should you use OpenCV in order to solve the problem? (I just guess I'm not doing it the "normal and clever way") Also how do I do the next step, which would be using more than one pixel as a "marker" and calculate the average point in the middle of multiple points. Is there something more efficient than running through each pixel?
Something like this basically:
Starting from an image with colored circles as markers, like:
Note that is a png image, i.e. with a loss-less compression which preserves the actual color. If you use a lossy compression like jpeg the colors will change a little, and you cannot segment them with an exact match, as done here.
You need to find the center of each marker.
Segment the (known) color, using inRange
Find all connected components with the given color, with findContours
Find the largest blob, here done with max_element with a lambda function, and distance. You can use a for loop for this.
Find the center of mass of the largest blob, here done with moments. You can use a loop also here, eventually.
Add the center to your source vertices.
Your destination vertices are just the four corners of the destination image.
You can then use getPerspectiveTransform and warpPerspective to find and apply the warping.
The resulting image is:
Code:
#include <opencv2/opencv.hpp>
#include <vector>
#include <algorithm>
using namespace std;
using namespace cv;
int main()
{
// Load image
Mat3b img = imread("path_to_image");
// Create a black output image
Mat3b out(300,300,Vec3b(0,0,0));
// The color of your markers, in order
vector<Scalar> colors{ Scalar(0, 0, 255), Scalar(0, 255, 0), Scalar(255, 0, 0), Scalar(0, 255, 255) }; // red, green, blue, yellow
vector<Point2f> src_vertices(colors.size());
vector<Point2f> dst_vertices = { Point2f(0, 0), Point2f(0, out.rows - 1), Point2f(out.cols - 1, out.rows - 1), Point2f(out.cols - 1, 0) };
for (int idx_color = 0; idx_color < colors.size(); ++idx_color)
{
// Detect color
Mat1b mask;
inRange(img, colors[idx_color], colors[idx_color], mask);
// Find connected components
vector<vector<Point>> contours;
findContours(mask, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
// Find largest
int idx_largest = distance(contours.begin(), max_element(contours.begin(), contours.end(), [](const vector<Point>& lhs, const vector<Point>& rhs) {
return lhs.size() < rhs.size();
}));
// Find centroid of largest component
Moments m = moments(contours[idx_largest]);
Point2f center(m.m10 / m.m00, m.m01 / m.m00);
// Found marker center, add to source vertices
src_vertices[idx_color] = center;
}
// Find transformation
Mat M = getPerspectiveTransform(src_vertices, dst_vertices);
// Apply transformation
warpPerspective(img, out, M, out.size());
imshow("Image", img);
imshow("Warped", out);
waitKey();
return 0;
}