strange result of graphcut in opencv - c++

I found the codes in stackoverflow and did some changes ,but the result of it is strange ,the mask is all white or all black.Now I want to know how to use GraphCut to find seeam line between two overlapping two images.thanks!
int main()
{
Mat image0=imread("F:\\1\\1.jpg");
Mat image1=imread("F:\\1\\2.jpg");
image0.convertTo(image0,CV_32F,1.0/255.0);
cv::imshow("image0",image0);
image1.convertTo(image1,CV_32F,1.0/255.0);
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
//Mat imageMask0;
//Mat imageMask1;
Mat imageMask0(image0.size(),CV_8U);
imageMask0(Rect(0,0,imageMask0.cols,imageMask0.rows)).setTo(255);
Mat imageMask1(image1.size(),CV_8U);
imageMask1(Rect(0,0,imageMask1.cols,imageMask1.rows)).setTo(255);
masks.push_back(imageMask0);
masks.push_back(imageMask1);
std::vector<cv::Mat> sources;
sources.push_back(image0);
sources.push_back(image1);
cv::detail::GraphCutSeamFinder *seam_finder = new cv::detail::GraphCutSeamFinder();
seam_finder->find(sources, corners, masks);
cv::imshow("mask",masks[1]);
masks[0].convertTo(masks[0],CV_8UC3,255);
cv::imwrite("F:\\1\\998.jpg",masks[0]);
masks[1].convertTo(masks[1],CV_8UC3,255);
cv::imwrite("F:\\1\\999.jpg",masks[1]);
printf("%lu\n", masks.size());
//for(int i = 0; i < masks.size(); i++)
//{
// std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
// }
cv::waitKey();
return 0;
}
Then how to fix the code to make it right.

Related

OpenCV c++ findHomography not returning same results

I am stitching three images. Now I wanted to implement warp perspective on the left side (image A and B) and right side (image B an C). I am able to compute both H matrices, but with some wierd behaving.
When I am computing A + B + C. It will do firstly B + C, then A + B. Here the H matric for AB is incorrect, but I dont know why!
When I will do only A+B then I will recieved "correct values". I tried everything, so I came here. It seems like there is some junk behind after computing B+C, but even I tried .clear() and .release() everything, still same behaving, I also tried to use .clone() everywhere - didnt help. I also checked if correct images are feeded in, yes they are.
I will try to simplify my code as much I can do:
// Stitcher variables
std::vector<std::pair<Mat, Mat>> images_toCalc;
std::vector<Mat> images;
std::vector<Mat> Hs;
//MAIN
images[2] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/left.jpg");
images[1] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/mid.jpg");
images[0] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/right.jpg");
stitcher->setImages(images, 3);
stitcher->runStitching2(false);
// runStitching2
for (int i = 0; i < this->images.size() - 1; i++) {
Mat image1 = this->images.at(i).clone();
Mat image2 = this->images.at(i + 1).clone();
//if (true) {
if (!isHalfAlready(i)) {
flip(image1, image1, 1);
flip(image2, image2, 1);
this->images_toCalc.push_back(std::make_pair(image2.clone(), image1.clone()));
}
else {
this->images_toCalc.push_back(std::make_pair(image1.clone(), image2.clone()));
}
}
for (int i = 0; i < images_toCalc.size(); i++) {
Mat H;
H = this->calcH(images_toCalc.at(i).first, images_toCalc.at(i).second);
this->Hs.push_back(H);
H.release();
if (this->debugLevel[3]) cout << "[" << i << "] H" << endl << Hs.at(i) << endl;
}
Now the buggy part
// this->calcH
Mat heStitcher::calcH(Mat& image1_toCalc, Mat& image2_toCalc) {
std::vector< KeyPoint > keypointsObject, keypointsScene;
Mat descriptorsObject, descriptorsScene;
Mat image1, image2;
image1 = image1_toCalc.clone();
image2 = image2_toCalc.clone();
if (greyCalc) {
cv::cvtColor(image1, image1, cv::COLOR_BGR2GRAY);
cv::cvtColor(image2, image2, cv::COLOR_BGR2GRAY);
}
Ptr<SURF> detector = SURF::create();
detector->detectAndCompute(image1, Mat(), keypointsObject, descriptorsObject);
detector->detectAndCompute(image2, Mat(), keypointsScene, descriptorsScene);
detector->clear();
vector<std::vector<DMatch>> matches;
Ptr<FlannBasedMatcher> matcher = cv::FlannBasedMatcher::create();
matcher->knnMatch(descriptorsObject, descriptorsScene, matches, 2);
matcher->clear();
double min_dist = 100;
double max_dist = 0;
std::vector< DMatch > goodMatches;
for (size_t i = 0; i < matches.size(); i++) {
if (matches[i][0].distance < 0.75f * matches[i][1].distance) {
goodMatches.push_back(matches[i][0]);
}
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for (int i = 0; i < goodMatches.size(); i++) {
obj.push_back(keypointsObject[goodMatches[i].queryIdx].pt);
scene.push_back(keypointsScene[goodMatches[i].trainIdx].pt);
}
cv::Mat H;
cv::Mat H2;
if (obj.size() < 4) {
if (this->debugLevel[2]) cout << endl << "!!!!!! not enough similarities (less than 4) !!!!!!" << endl;
}
else {
H = findHomography(obj, scene, RANSAC);
}
image1.release();
image2.release();
descriptorsObject.release();
descriptorsScene.release();
keypointsObject.clear();
keypointsScene.clear();
obj.clear();
scene.clear();
goodMatches.clear();
matches.clear();
H2 = H.clone();
H.release();
return H2;
}
After this warpPerspective, border cutting etc, results with matrices in picture 1 a 2. (from where obviously matrices are incorrect). I cannot understant, why when using same script, with same procedure, I am getting different ressults.

Contour segmentation using opencv

I am currently working on separating overlapped objects using C++ and OpenCV 3.0 so far I have been able to find contours and corner points. Now I need to separate these objects based on corner points and put them into separate vectors.
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include<algorithm>
using namespace cv;
using namespace std;
/// Global variables
Mat src, src_gray;
int thresh = 100;
int max_thresh = 255;
int maxCorners =14;
int maxTrackbar =14;
RNG rng(12345);
char* source_window = "Image";
/// Function header
void goodFeaturesToTrack_Demo(int, void*);
/// Function header
void thresh_callback(int, void*);
void printvec(vector<int>& contours){
for (int i = 0; i < contours.size(); i++){
cout << contours[i] << " ";
}
cout << endl;
}
/** #function main */
int main(int argc, char** argv)
{
/// Load source image and convert it to gray
src = imread("demo.png", 1);
cvtColor(src, src_gray, COLOR_BGR2GRAY);
/// Convert image to gray and blur it
cvtColor(src, src_gray, CV_BGR2GRAY);
blur(src_gray, src_gray, Size(3,3));
/// Create Window
namedWindow(source_window, WINDOW_AUTOSIZE);
/// Create Trackbar to set the number of corners
createTrackbar("Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo);
imshow(source_window, src);
goodFeaturesToTrack_Demo(0, 0);
waitKey(0);
return(0);
}
void goodFeaturesToTrack_Demo(int, void*)
{
if (maxCorners < 1) { maxCorners = 1; }
vector<Point2f> corners;
double qualityLevel = 0.01;
double minDistance = 10;
int blockSize = 3;
bool useHarrisDetector = false;
double k = 0.04;
/// Copy the source image
Mat copy;
copy = src.clone();
/// Apply corner detection
goodFeaturesToTrack(src_gray,
corners,
maxCorners,
qualityLevel,
minDistance,
Mat(),
blockSize,
useHarrisDetector,
k);
/// Draw corners detected
cout << "** Number of corners detected: " << corners.size() << endl;
int r = 4;
for (int i = 0; i < corners.size(); i++)
{
circle(copy, corners[i], r, Scalar(rng.uniform(0, 255), rng.uniform(0, 255),
rng.uniform(0, 255)), -1, 8, 0);
}
/// Show what you got
namedWindow(source_window, WINDOW_AUTOSIZE);
imshow(source_window, copy);
/// Set the neeed parameters to find the refined corners
Size winSize = Size(5, 5);
Size zeroZone = Size(-1, -1);
TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::MAX_ITER, 40, 0.001);
/// Calculate the refined corner locations
cornerSubPix(src_gray, corners, winSize, zeroZone, criteria);
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
/// Detect edges using canny
Canny(src_gray, canny_output, thresh, thresh * 2, 3);
/// Find contours
findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
/// Write them down
for (int i = 0; i < corners.size(); i++)
{
cout << " -- Refined Corner [" << i << "] (" << corners[i].x << "," << corners[i].y << ")" << endl;
float valx = corners[i].x;
float valy = corners[i].y;
int xp = round(valx);
int yp = round(valy);
cout << " -- Refined Corner [" << i << "] (" << xp << "," << yp << ")" << endl;
}
for (int i = 0; i < contours.size(); i++)
{
for (int j = 0; j < contours[i].size(); j++)
{
cout << "Point(x,y)=" << contours[i][j].x << "," << contours[i][j].y << endl;
}
}
}
This is the result of the code:
and this is what I want to get:

opencv graphcut doesn't return correct mask

I have this code:
int main(int argc, char* argv[])
{
Mat image0=imread("C:\\Working Dir\\Tests\\TestBlending\\shop0.jpg");
Mat image1=imread("C:\\Working Dir\\Tests\\TestBlending\\shop1.jpg");
image0.convertTo(image0,CV_32FC3,1/255.0);
image1.convertTo(image1,CV_32FC3,1/255.0);
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
Mat mask0(image0.size(), CV_8U);
mask0(Rect(0, 0, mask0.cols, mask0.rows)).setTo(255);
Mat mask1(image1.size(), CV_8U);
mask1(Rect(0, 0, mask1.cols, mask1.rows)).setTo(255);
masks.push_back(mask0);
masks.push_back(mask1);
std::vector<cv::Mat> sources;
sources.push_back(image0);
sources.push_back(image1);
cv::detail::GraphCutSeamFinder seam_finder;
seam_finder.find(sources, corners, masks);
printf("%lu\n", masks.size());
for(int i = 0; i < masks.size(); i++)
{
std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
}
return 0;
}
and the images that I am using are:
The masks that I am getting is all 255 for image 0 and all zero for image 1.
What is the problem and how can I fix it?
Edit1
I noted that input images should be in tif format so the application can see the transparent pixels in each image so here is the images files in tif format:
I used smartblend (http://wiki.panotools.org/SmartBlend) to blend these two images and I can get this image:

how to use opencv graphcut

I am trying to get a graphcut to cut out an eye in a given image and replace it with another eye.
void imageStitching::findEye(cv::Mat originalImage, cv::Mat eye)
{
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
std::vector<cv::Mat> sources;
sources.push_back(originalImage);
sources.push_back(eye);
cv::detail::GraphCutSeamFinder *seam_finder = new cv::detail::GraphCutSeamFinder();
seam_finder->find(sources, corners, masks);
printf("%lu\n", masks.size());
for(int i = 0; i < masks.size(); i++)
{
std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
}
}
But the code crashes on
seam_finder->find(sources, corners, masks);
with the message:
Thread 1: EXC_BAD_ACCESS(code=1, address=0x0)
How do I use opencv to get a graphcut of an object at a location in an image?

Getting maximum pixel value in grayscale image in opencv

After I do some image manipulation, and apply mask, I get what I want. I can clearly see on imshow result of "crop" that there's gray pixels in the middle of image.
I'm trying to get the maximum pixel value location. I've checked the crop.channels(), which returns 1.
Mat mask = drawing2;
drawContours(mask, contours, -1, Scalar(255), CV_FILLED);
Mat dist;
distanceTransform( cannyInv, dist, CV_DIST_L2, 3 );
normalize(dist,dist,0.0,1.0,NORM_MINMAX);
Mat crop;
dist.copyTo(crop, mask);
cout << "max.. "<< *std::max_element(crop.begin<double>(),crop.end<double>()) <<endl;
which returns max.. 4.25593e-08
for(int y = 0; y < crop.rows; y++)
{
for(int x = 0; x < crop.cols; x++)
{
if (crop.at<unsigned char>(x,y) > 0){
cout << "X........"<<x<<" Y......"<<y<< " = "<<crop.at<unsigned char>(x,y) <<endl;
}
}
}
The output is:
X........604 Y......479 = ¿
X........607 Y......479 =
X........610 Y......479 = ¿
Help me please
PD: I know that there's similar question. But this is specific problem.
I'm not sure how I solved it. A lot of time has passed. But the code that currently I have and it works is this:
Mat dist=Mat::zeros(480,640, CV_8UC1);;
distanceTransform( cannyInv, dist, CV_DIST_L2, 3 );
Mat distNorm;
dist.convertTo(distNorm, CV_8UC1,1,0);
Mat result= Mat::zeros(480,640, CV_8UC1);
distNorm.copyTo(result, mask);
Mat tmp=Mat::zeros(480,640, CV_8UC1);
Mat fik=Mat::zeros(480,640, CV_8UC3);
for(int i = 0; i < result.rows; i++)
{
for(int j = 0; j < result.cols; j++)
{
if ( result.at< uchar >( i,j ) > 0){
uchar val = result.at< uchar >( i,j );
if(val>maxVal){
if(val>0){
cv::circle(tmp,cvPoint(j,i),2,255,-1);
}
maxVal=val;
maxX = j;
maxY = i;
}
}
}
}
Are you sure that normalizing the Mat automatically converts it from uchar to double? It's very likely the data is still stored as uchars and you're reading wrong numbers from it.
Try dist.convertTo(dist, CV_64F);
Print the numbers as doubles everywhere
OR work only with uchars.
Try this code:
cout << "X........"
<< x
<< " Y......"
<< y
<< " = "
<< (double) crop.at< unsigned char>(x,y) <<endl;