Stitching 2 images with overlapping area using opencv - c++

I want to stitch 2 images using opencv(i don't want to use stitcher class), so far i've done keypoint detection, description, matching and warping
there are input images:
left
right
myOutput
stitcherClassOutput
here is my code after finding good matches with surf algorithm:
for (int j = 0; j < good_matches.size(); j++)
{
//-- Get the keypoints from the good matches
obj.push_back(keypoints1[good_matches[j].queryIdx].pt);
scene.push_back(keypoints2[good_matches[j].trainIdx].pt);
}
H = findHomography(Mat(scene), Mat(obj),match_mask, CV_RANSAC);
cv::Mat result;
warpPerspective(image2, result, H, cv::Size(image2.cols + image1.cols, image2.rows*2), INTER_CUBIC);
Mat final(Size(image2.cols * 2 + image2.cols, image2.rows * 2), CV_8UC3);
Mat roi1(final, Rect(0, 0, image1.cols, image1.rows));
Mat roi2(final, Rect(0, 0, result.cols, result.rows));
result.copyTo(roi2);
image1.copyTo(roi1);
imshow("Result", final);
so my question is, what should i add to my code for my output to look more like the one from stitcher class

Related

opencv cornerSubPix Exception while converting python code to c++

I am trying to port this response to c++ but I am not able to get past this cryptic exception (see image below). Not sure what is the limiting factor. I imagine it is the image color format or the corners parameter but nothing seems to be working. If it is related to converting color format please provide a small code snippet.
The python code provided by Anubhav Singh is working great however I would like to develop in c++. Any help would be greatly appreciated.
I am using OpenCV04.2.0
void CornerDetection(){
std::string image_path = samples::findFile("../wing.png");
Mat img = imread(image_path);
Mat greyMat;
Mat dst;
cv::cvtColor(img, greyMat, COLOR_BGR2GRAY);
threshold(greyMat, greyMat, 0, 255, THRESH_BINARY | THRESH_OTSU);
cornerHarris(greyMat, dst, 9, 5, 0.04);
dilate(dst, dst,NULL);
Mat img_thresh;
threshold(dst, img_thresh, 0.32 * 255, 255, 0);
img_thresh.convertTo(img_thresh, CV_8UC1);
Mat labels = Mat();
Mat stats = Mat();
Mat centroids = Mat();
cv::connectedComponentsWithStats(img_thresh, labels, stats, centroids, 8, CV_32S);
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.001);
std::vector<Point2f> corners = std::vector<Point2f>();
Size winSize = Size(5, 5);
Size zeroZone = Size(-1, -1);
cornerSubPix(greyMat, corners, winSize, zeroZone, criteria);
for (int i = 0; i < corners.size(); i++)
{
circle(img, Point(corners[i].x, corners[i].y), 5, Scalar(0, 255, 0), 2);
}
imshow("img", img);
waitKey();
destroyAllWindows();
}
The solution was to iterate over the centroids to build the corners vector before passing the corners variable to the cornerSubPix(...) function.
std::vector<Point2f> corners = std::vector<Point2f>();
for (int i = 0; i < centroids.rows; i++)
{
double x = centroids.at<double>(i, 0);
double y = centroids.at<double>(i, 1);
corners.push_back(Point2f(x, y));
}
The output of the solution is still not exactly what the python output is, regardless it fixed this question in case anyone else ran across this issue.

OpenCV - warpPerspective

I'm trying to use the function "warpPerspective" with OpenCV 3.0. I'm using this example:
http://answers.opencv.org/question/98110/how-do-i-stitch-images-with-two-different-angles/
I have to create a ROI on the right side of the first image and another one on the left side of the second image. Use ORB to extract and compute descriptions and match these ones. I didn't changed much of the original code. Just the ROI.
The problem is that every image that i try to warp the perspective comes out like this:
I already tried with multiple pairs of images and the problem persists.
#include "opencv2/opencv.hpp"
#include <iostream>
#include <fstream>
#include <ctype.h>
using namespace cv;
using namespace std;
int main(int argc, char* argv[])
{
Mat img1 = imread("image2.jpg");
Mat img2 = imread("image1.jpg");
namedWindow("I2", WINDOW_NORMAL); namedWindow("I1", WINDOW_NORMAL);
Ptr<ORB> o1 = ORB::create();
Ptr<ORB> o2 = ORB::create();
vector<KeyPoint> pts1, pts2;
Mat desc1, desc2;
vector<DMatch> matches;
Size s = img1.size();
Size s2 = img2.size();
Rect r1(s.width - 200, 0, 200, s.height);
//rectangle(img1, r1, Scalar(255, 0, 0), 5);
Rect r2(0, 0, 200, s2.height);
//rectangle(img2, r2, Scalar(255, 0, 0), 5);
Mat mask1 = Mat::zeros(img1.size(), CV_8UC1);
Mat mask2 = Mat::zeros(img1.size(), CV_8UC1);
mask1(r1) = 1;
mask2(r2) = 1;
o1->detectAndCompute(img1, mask1, pts1, desc1);
o2->detectAndCompute(img2, mask2, pts2, desc2);
BFMatcher descriptorMatcher(NORM_HAMMING, true);
descriptorMatcher.match(desc1, desc2, matches, Mat());
// Keep best matches only to have a nice drawing.
// We sort distance between descriptor matches
Mat index;
int nbMatch = int(matches.size());
Mat tab(nbMatch, 1, CV_32F);
for (int i = 0; i<nbMatch / 2; i++)
{
tab.at<float>(i, 0) = matches[i].distance;
}
sortIdx(tab, index, SORT_EVERY_COLUMN + SORT_ASCENDING);
vector<DMatch> bestMatches;
vector<Point2f> src, dst;
for (int i = 0; i < nbMatch / 2; i++)
{
int j = index.at<int>(i, 0);
cout << pts1[matches[j].queryIdx].pt << "\t" << pts2[matches[j].trainIdx].pt << "\n";
src.push_back(pts1[matches[j].queryIdx].pt + Point2f(0, img1.rows)); // necessary offset
dst.push_back(pts2[matches[j].trainIdx].pt);
}
cout << "\n";
Mat h = findHomography(src, dst, RANSAC);
Mat result;
cout << h << endl;
warpPerspective(img2, result, h.inv(), Size(3 * img2.cols + img1.cols, 2 * img2.rows + img1.rows));
imshow("I1", img1);
imshow("I2", img2);
Mat roi1(result, Rect(0, img1.rows, img1.cols, img1.rows));
img1.copyTo(roi1);
namedWindow("I3", WINDOW_NORMAL);
imshow("I3", result);
imwrite("result.jpg", result);
waitKey();
return 0;
Does that comes from bad matches? Am i missing something? Since i'm kind of new to this topic, any help or ideas would be really appreciated.
Here's the quick things you need to check when your warp perspective is not working-
Did you select the right points in both the images ?
Reason: You need to choose exactly the same points that correspond to each
other when finding a perspective transform. Unrelated points ruin it.
Are your points in right order in the array ?
R: You need to put them in the right corresponding order in both the source and
destination before passing to findhomography.
Are you passing then in the right order to findHomography ? Try switching in
case you are not sure. So that is doesn't reverse warp it
Those are the mistakes i did when i first used it. Now if you see your images, there's a little part overlapping in both the images. You need to be more careful over there. Your rect mask might be the fault.

OpenCV--how to get better hand contour from low quality gray image?

I need to get contour from hand image, usually I process image with 4 steps:
get raw RGB gray image from 3 channels to 1 channel:
cvtColor(sourceGrayImage, sourceGrayImage, COLOR_BGR2GRAY);
use Gaussian blur to filter gray image:
GaussianBlur(sourceGrayImage, sourceGrayImage, Size(3,3), 0);
binary gray image, I split image by height, normally I split image to 6 images by its height, then each one I do threshold process:
// we split source picture to binaryImageSectionCount(here it's 8) pieces by its height,
// then we for every piece, we do threshold,
// and at last we combine them agin to binaryImage
const binaryImageSectionCount = 8;
void GetBinaryImage(Mat &grayImage, Mat &binaryImage)
{
// get every partial gray image's height
int partImageHeight = grayImage.rows / binaryImageSectionCount;
for (int i = 0; i < binaryImageSectionCount; i++)
{
Mat partialGrayImage;
Mat partialBinaryImage;
Rect partialRect;
if (i != binaryImageSectionCount - 1)
{
// if it's not last piece, Rect's height should be partImageHeight
partialRect = Rect(0, i * partImageHeight, grayImage.cols, partImageHeight);
}
else
{
// if it's last piece, Rect's height should be (grayImage.rows - i * partImageHeight)
partialRect = Rect(0, i * partImageHeight, grayImage.cols, grayImage.rows - i * partImageHeight);
}
Mat partialResource = grayImage(partialRect);
partialResource.copyTo(partialGrayImage);
threshold( partialGrayImage, partialBinaryImage, 0, 255, THRESH_OTSU);
// combin partial binary image to one piece
partialBinaryImage.copyTo(binaryImage(partialRect));
///*stringstream resultStrm;
//resultStrm << "partial_" << (i + 1);
//string string = resultStrm.str();
//imshow(string, partialBinaryImage);
//waitKey(0);*/
}
imshow("result binary image.", binaryImage);
waitKey(0);
return;
}
use findcontour to get biggest area contour:
vector<vector<Point> > contours;
findContours(binaryImage, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
normally it works well,
But for some low quality gray image, it doesn't work,like below:
the complete code is here:
#include <opencv2/imgproc/imgproc.hpp>
#include<opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
// we split source picture to binaryImageSectionCount(here it's 8) pieces by its height,
// then we for every piece, we do threshold,
// and at last we combine them agin to binaryImage
const binaryImageSectionCount = 8;
void GetBinaryImage(Mat &grayImage, Mat &binaryImage)
{
// get every partial gray image's height
int partImageHeight = grayImage.rows / binaryImageSectionCount;
for (int i = 0; i < binaryImageSectionCount; i++)
{
Mat partialGrayImage;
Mat partialBinaryImage;
Rect partialRect;
if (i != binaryImageSectionCount - 1)
{
// if it's not last piece, Rect's height should be partImageHeight
partialRect = Rect(0, i * partImageHeight, grayImage.cols, partImageHeight);
}
else
{
// if it's last piece, Rect's height should be (grayImage.rows - i * partImageHeight)
partialRect = Rect(0, i * partImageHeight, grayImage.cols, grayImage.rows - i * partImageHeight);
}
Mat partialResource = grayImage(partialRect);
partialResource.copyTo(partialGrayImage);
threshold( partialGrayImage, partialBinaryImage, 0, 255, THRESH_OTSU);
// combin partial binary image to one piece
partialBinaryImage.copyTo(binaryImage(partialRect));
///*stringstream resultStrm;
//resultStrm << "partial_" << (i + 1);
//string string = resultStrm.str();
//imshow(string, partialBinaryImage);
//waitKey(0);*/
}
imshow("result binary image.", binaryImage);
waitKey(0);
return;
}
int main(int argc, _TCHAR* argv[])
{
// get image path
string imgPath("C:\\Users\\Alfred\\Desktop\\gray.bmp");
// read image
Mat src = imread(imgPath);
imshow("Source", src);
//medianBlur(src, src, 7);
cvtColor(src, src, COLOR_BGR2GRAY);
imshow("gray", src);
// do filter
GaussianBlur(src, src, Size(3,3), 0);
// binary image
Mat threshold_output(src.rows, src.cols, CV_8UC1, Scalar(0, 0, 0));
GetBinaryImage(src, threshold_output);
imshow("binaryImage", threshold_output);
// get biggest contour
vector<vector<Point> > contours;
findContours(threshold_output,contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int biggestContourIndex = 0;
int maxContourArea = -1000;
for (int i = 0; i < contours.size(); i++)
{
if (contourArea(contours[i]) > maxContourArea)
{
maxContourArea = contourArea(contours[i]);
biggestContourIndex = i;
}
}
// show biggest contour
Mat biggestContour(threshold_output.rows, threshold_output.cols, CV_8UC1, Scalar(0, 0, 0));
drawContours(biggestContour, contours, biggestContourIndex, cv::Scalar(255,255,255), 2, 8, vector<Vec4i>(), 0, Point());
imshow("maxContour", biggestContour);
waitKey(0);
}
could anybody please help me to get a better hand contour result?
thanks!!!
I have the code snippet in python, you can follow the same approach in C:
img = cv2.imread(x, 1)
cv2.imshow("img",img)
imgray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
cv2.imshow("gray",imgray)
#Code for histogram equalization
equ = cv2.equalizeHist(imgray)
cv2.imshow('equ', equ)
#Code for contrast limited adaptive histogram equalization
#clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
#cl2 = clahe.apply(imgray)
#cv2.imshow('clahe2', cl2)
This is the result I obtained:
If you're image is horribly bad you could try the code that I commented involving contrast limited adaptive histogram equalization.

Segmentation of foreground from background

I'm currently working on a project that uses a Lacatan Banana, and I would like to know how to further separate the foreground from the background:
I already got a segmented image of it using erosion, dilation, and thresholding only. The problem is that it is still not properly segmented.
Here is my code:
cv::Mat imggray, imgthresh, fg, bgt, bg;
cv::cvtColor(src, imggray, CV_BGR2GRAY); //Grayscaling the image from RGB color space
cv::threshold(imggray, imgthresh, 0, 255, CV_THRESH_BINARY_INV | CV_THRESH_OTSU); //Create an inverted binary image from the grayscaled image
cv::erode(imgthresh, fg, cv::Mat(), cv::Point(-1, -1), 1); //erosion of the binary image and setting it as the foreground
cv::dilate(imgthresh, bgt, cv::Mat(), cv::Point(-1, -1), 4); //dilation of the binary image to reduce the background region
cv::threshold(bgt, bg, 1, 128, CV_THRESH_BINARY); //we get the background by setting the threshold to 1
cv::Mat markers = cv::Mat::zeros(src.size(), CV_32SC1); //initializing the markers with a size same as the source image and setting its data type as 32-bit Single channel
cv::add(fg, bg, markers); //setting the foreground and background as markers
cv::Mat mask = cv::Mat::zeros(markers.size(), CV_8UC1);
markers.convertTo(mask, CV_8UC1); //converting the 32-bit single channel marker to a 8-bit single channel
cv::Mat mthresh;
cv::threshold(mask, mthresh, 0, 255, CV_THRESH_BINARY | CV_THRESH_OTSU); //threshold further the mask to reduce the noise
// cv::erode(mthresh,mthresh,cv::Mat(), cv::Point(-1,-1),2);
cv::Mat result;
cv::bitwise_and(src, src, result, mthresh); //use the mask to subtrack the banana from the background
for (int x = 0; x < result.rows; x++) { //changing the black background to white
for (int y = 0; y < result.cols; y++) {
if (result.at<Vec3b>(x, y) == Vec3b(0, 0, 0)){
result.at<Vec3b>(x, y)[0] = 255;
result.at<Vec3b>(x, y)[1] = 255;
result.at<Vec3b>(x, y)[2] = 255;
}
}
}
This is my result:
As the background is near gray-color, try using Hue channel and Saturation channel instead of grayscale image.
You can get them easily.
cv::Mat hsv;
cv::cvtColor(src, hsv, CV_BGR2HSV);
std::vector<cv::Mat> channels;
cv::split(src, channels);
cv::Mat hue = channels[0];
cv::Mat saturation = channels[1];
// If you want to combine those channels, use this code.
cv::Mat hs = cv::Mat::zeros(src.size(), CV_8U);
for(int r=0; r<src.rows; r++) {
for(int c=0; c<src.cols; c++) {
int hp = h.at<uchar>(r,c);
int sp = s.at<uchar>(r,c);
hs.at<uchar>(r, c) = static_cast<uchar>((h+s)>>1);
}
}
adaptiveThreshold() should work better than just level-cut threshold(), because it does not consider absolute color levels, but rather a change in color in small area around the point being checked.
Try replacing your thresholding with adaptive one.
Use a top-hat instead of just erosion/dilation. It will take care of the background variations at the same time.
Then in your case a simple thresholding should be good enough to have an accurate segmentation. Else, you can couple it with a watershed.
(I will share some images asap).
Thanks guys, I tried to apply your advises and was able to come up with this
However as you can see there are still bits of the background,any ideas how to "clean" these further, i tried thresholding further but it would still have the bits.The Code I came up with is below and i apologize in advance if the variables and coding style is somewhat confusing didn't have the time to properly sort them.
#include <stdio.h>
#include <iostream>
#include <opencv2\core.hpp>
#include <opencv2\opencv.hpp>
#include <opencv2\highgui.hpp>
using namespace cv;
using namespace std;
Mat COLOR_MAX(Scalar(65, 255, 255));
Mat COLOR_MIN(Scalar(15, 45, 45));
int main(int argc, char** argv){
Mat src,hsv_img,mask,gray_img,initial_thresh;
Mat second_thresh,add_res,and_thresh,xor_thresh;
Mat result_thresh,rr_thresh,final_thresh;
// Load source Image
src = imread("sample11.jpg");
imshow("Original Image", src);
cvtColor(src,hsv_img,CV_BGR2HSV);
imshow("HSV Image",hsv_img);
//imwrite("HSV Image.jpg", hsv_img);
inRange(hsv_img,COLOR_MIN,COLOR_MAX, mask);
imshow("Mask Image",mask);
cvtColor(src,gray_img,CV_BGR2GRAY);
adaptiveThreshold(gray_img, initial_thresh, 255,ADAPTIVE_THRESH_GAUSSIAN_C,CV_THRESH_BINARY_INV,257,2);
imshow("AdaptiveThresh Image", initial_thresh);
add(mask,initial_thresh,add_res);
erode(add_res, add_res, Mat(), Point(-1, -1), 1);
dilate(add_res, add_res, Mat(), Point(-1, -1), 5);
imshow("Bitwise Res",add_res);
threshold(gray_img,second_thresh,170,255,CV_THRESH_BINARY_INV | CV_THRESH_OTSU);
imshow("TreshImge", second_thresh);
bitwise_and(add_res,second_thresh,and_thresh);
imshow("andthresh",and_thresh);
bitwise_xor(add_res, second_thresh, xor_thresh);
imshow("xorthresh",xor_thresh);
bitwise_or(and_thresh,xor_thresh,result_thresh);
imshow("Result image", result_thresh);
bitwise_and(add_res,result_thresh,final_thresh);
imshow("Final Thresh",final_thresh);
erode(final_thresh, final_thresh, Mat(), Point(-1,-1),5);
bitwise_and(src,src,rr_thresh,final_thresh);
imshow("Segmented Image", rr_thresh);
imwrite("Segmented Image.jpg", rr_thresh);
waitKey(0);
return 1;
}

Opencv c++Template/Pattern Matching Scale and Rotation invariant

I want to see if a template in present in an image using openCv and c++. However due to different distance at which the image is taken and different position of the image, the match does not occur correctly.
here is my code:
IplImage* image = cvLoadImage("C:/images/Photo0734.jpg", 1);
IplImage* templat = cvLoadImage("C:/images/templatecoin.jpg", 1);
int percent =25;// declare a destination IplImage object with correct size,
depth and channels
IplImage* image3 = cvCreateImage( cvSize((int)((image->width*percent)/100) ,
(int)((image->height*percent)/100) ),image->depth, image->nChannels );
//use cvResize to resize source to a destination image
cvResize(image, image3);
IplImage* image2 = cvCreateImage(cvSize(image3->width, image3->height),
IPL_DEPTH_8U, 1);
IplImage* templat2 = cvCreateImage(cvSize(templat->width,
templat->height), IPL_DEPTH_8U, 1);
cvCvtColor(image3, image2, CV_BGR2GRAY);
cvCvtColor(templat, templat2, CV_BGR2GRAY);
int w = image3->width - templat->width + 1;
int h = image3->height - templat->height + 1;
result = cvCreateImage(cvSize(w, h), IPL_DEPTH_32F, 1);
cvMatchTemplate(image2, templat2, result, CV_TM_CCORR_NORMED);
double min_val, max_val;
CvPoint min_loc, max_loc;
cvMinMaxLoc(result, &min_val, &max_val, &min_loc, &max_loc);
cvRectangle(image3, max_loc, cvPoint(max_loc.x+templat->width,
max_loc.y+templat->height), cvScalar(0,1,1), 1);
cvShowImage("src", image3);
//cvShowImage("result image", result);
cvWaitKey(0);
Please note that I am Unable to use "Mat". Is it possible to use IplImage* and enable the code to be invariant to scaling and rotation? help me.
Let have a look to that :
SIFT Wiki
SIFT example
OpenCV SIFT documentation
I think that can be usefull for you.