I am trying to get a graphcut to cut out an eye in a given image and replace it with another eye.
void imageStitching::findEye(cv::Mat originalImage, cv::Mat eye)
{
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
std::vector<cv::Mat> sources;
sources.push_back(originalImage);
sources.push_back(eye);
cv::detail::GraphCutSeamFinder *seam_finder = new cv::detail::GraphCutSeamFinder();
seam_finder->find(sources, corners, masks);
printf("%lu\n", masks.size());
for(int i = 0; i < masks.size(); i++)
{
std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
}
}
But the code crashes on
seam_finder->find(sources, corners, masks);
with the message:
Thread 1: EXC_BAD_ACCESS(code=1, address=0x0)
How do I use opencv to get a graphcut of an object at a location in an image?
Related
I am stitching three images. Now I wanted to implement warp perspective on the left side (image A and B) and right side (image B an C). I am able to compute both H matrices, but with some wierd behaving.
When I am computing A + B + C. It will do firstly B + C, then A + B. Here the H matric for AB is incorrect, but I dont know why!
When I will do only A+B then I will recieved "correct values". I tried everything, so I came here. It seems like there is some junk behind after computing B+C, but even I tried .clear() and .release() everything, still same behaving, I also tried to use .clone() everywhere - didnt help. I also checked if correct images are feeded in, yes they are.
I will try to simplify my code as much I can do:
// Stitcher variables
std::vector<std::pair<Mat, Mat>> images_toCalc;
std::vector<Mat> images;
std::vector<Mat> Hs;
//MAIN
images[2] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/left.jpg");
images[1] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/mid.jpg");
images[0] = imread("E:/JOB/StitchingProject/StitchingApp/x64/Debug/i/right.jpg");
stitcher->setImages(images, 3);
stitcher->runStitching2(false);
// runStitching2
for (int i = 0; i < this->images.size() - 1; i++) {
Mat image1 = this->images.at(i).clone();
Mat image2 = this->images.at(i + 1).clone();
//if (true) {
if (!isHalfAlready(i)) {
flip(image1, image1, 1);
flip(image2, image2, 1);
this->images_toCalc.push_back(std::make_pair(image2.clone(), image1.clone()));
}
else {
this->images_toCalc.push_back(std::make_pair(image1.clone(), image2.clone()));
}
}
for (int i = 0; i < images_toCalc.size(); i++) {
Mat H;
H = this->calcH(images_toCalc.at(i).first, images_toCalc.at(i).second);
this->Hs.push_back(H);
H.release();
if (this->debugLevel[3]) cout << "[" << i << "] H" << endl << Hs.at(i) << endl;
}
Now the buggy part
// this->calcH
Mat heStitcher::calcH(Mat& image1_toCalc, Mat& image2_toCalc) {
std::vector< KeyPoint > keypointsObject, keypointsScene;
Mat descriptorsObject, descriptorsScene;
Mat image1, image2;
image1 = image1_toCalc.clone();
image2 = image2_toCalc.clone();
if (greyCalc) {
cv::cvtColor(image1, image1, cv::COLOR_BGR2GRAY);
cv::cvtColor(image2, image2, cv::COLOR_BGR2GRAY);
}
Ptr<SURF> detector = SURF::create();
detector->detectAndCompute(image1, Mat(), keypointsObject, descriptorsObject);
detector->detectAndCompute(image2, Mat(), keypointsScene, descriptorsScene);
detector->clear();
vector<std::vector<DMatch>> matches;
Ptr<FlannBasedMatcher> matcher = cv::FlannBasedMatcher::create();
matcher->knnMatch(descriptorsObject, descriptorsScene, matches, 2);
matcher->clear();
double min_dist = 100;
double max_dist = 0;
std::vector< DMatch > goodMatches;
for (size_t i = 0; i < matches.size(); i++) {
if (matches[i][0].distance < 0.75f * matches[i][1].distance) {
goodMatches.push_back(matches[i][0]);
}
}
std::vector< Point2f > obj;
std::vector< Point2f > scene;
for (int i = 0; i < goodMatches.size(); i++) {
obj.push_back(keypointsObject[goodMatches[i].queryIdx].pt);
scene.push_back(keypointsScene[goodMatches[i].trainIdx].pt);
}
cv::Mat H;
cv::Mat H2;
if (obj.size() < 4) {
if (this->debugLevel[2]) cout << endl << "!!!!!! not enough similarities (less than 4) !!!!!!" << endl;
}
else {
H = findHomography(obj, scene, RANSAC);
}
image1.release();
image2.release();
descriptorsObject.release();
descriptorsScene.release();
keypointsObject.clear();
keypointsScene.clear();
obj.clear();
scene.clear();
goodMatches.clear();
matches.clear();
H2 = H.clone();
H.release();
return H2;
}
After this warpPerspective, border cutting etc, results with matrices in picture 1 a 2. (from where obviously matrices are incorrect). I cannot understant, why when using same script, with same procedure, I am getting different ressults.
I found the codes in stackoverflow and did some changes ,but the result of it is strange ,the mask is all white or all black.Now I want to know how to use GraphCut to find seeam line between two overlapping two images.thanks!
int main()
{
Mat image0=imread("F:\\1\\1.jpg");
Mat image1=imread("F:\\1\\2.jpg");
image0.convertTo(image0,CV_32F,1.0/255.0);
cv::imshow("image0",image0);
image1.convertTo(image1,CV_32F,1.0/255.0);
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
//Mat imageMask0;
//Mat imageMask1;
Mat imageMask0(image0.size(),CV_8U);
imageMask0(Rect(0,0,imageMask0.cols,imageMask0.rows)).setTo(255);
Mat imageMask1(image1.size(),CV_8U);
imageMask1(Rect(0,0,imageMask1.cols,imageMask1.rows)).setTo(255);
masks.push_back(imageMask0);
masks.push_back(imageMask1);
std::vector<cv::Mat> sources;
sources.push_back(image0);
sources.push_back(image1);
cv::detail::GraphCutSeamFinder *seam_finder = new cv::detail::GraphCutSeamFinder();
seam_finder->find(sources, corners, masks);
cv::imshow("mask",masks[1]);
masks[0].convertTo(masks[0],CV_8UC3,255);
cv::imwrite("F:\\1\\998.jpg",masks[0]);
masks[1].convertTo(masks[1],CV_8UC3,255);
cv::imwrite("F:\\1\\999.jpg",masks[1]);
printf("%lu\n", masks.size());
//for(int i = 0; i < masks.size(); i++)
//{
// std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
// }
cv::waitKey();
return 0;
}
Then how to fix the code to make it right.
I wants to embed watermark into an image using dct with c++ and opencv.
I split image into 8x8 block and apply dct to each block.
Now I don't know what to do next, Can anyone give me some hint or help me?
Here is my work so far.
int main() {
Mat originalImage;
originalImage = imread("image.jpg");
if( !originalImage.data )
{
std::cout<< "Error loading original image!"<<std::endl;
return -1;
}
cout << "Working on image from image.jpg" << endl;
/// Create Windows
namedWindow("Original", 1);
imshow( "Original", originalImage );
int x = 0; int y = 0;
moveWindow("Original", x, y);
imshow("Original", originalImage);
x += 100; y += 100;
int width = originalImage.size().width;
int height = originalImage.size().width;
cout << "Original image Width x Height is " << width << "x" << height << endl;
// Leave original alone, work on a copy
Mat dctImage = originalImage.clone();
// Step through the copied image with rectangles size 8x8
// For each block, split into planes, do dct, and merge back
// into the block. (This will affect the image from
// which the block is selected each time.)
for (int i = 0; i < height; i += 8)
{
for (int j = 0; j < width; j+= 8)
{
Mat block = dctImage(Rect(i, j, 8, 8));
vector<Mat> planes;
split(block, planes);
vector<Mat> outplanes(planes.size());
for (size_t k = 0; k < planes.size(); k++)
{
planes[k].convertTo(planes[k], CV_32FC1);
dct(planes[k], outplanes[k]);
outplanes[k].convertTo(outplanes[k], CV_8UC1);
}
merge(outplanes, block);
}
}
namedWindow("dctBlockImage");
moveWindow("dctBlockImage", x, y);
imshow("dctBlockImage", dctImage);
x += 100; y += 100;
waitKey();
destroyAllWindows();
return 0;
}
I have this code:
int main(int argc, char* argv[])
{
Mat image0=imread("C:\\Working Dir\\Tests\\TestBlending\\shop0.jpg");
Mat image1=imread("C:\\Working Dir\\Tests\\TestBlending\\shop1.jpg");
image0.convertTo(image0,CV_32FC3,1/255.0);
image1.convertTo(image1,CV_32FC3,1/255.0);
// our corners are just at (0,0)
cv::Point corner1;
corner1.x = 0;
corner1.y = 0;
cv::Point corner2;
corner2.x = 0;
corner2.y = 0;
std::vector<cv::Point> corners;
corners.push_back(corner1);
corners.push_back(corner2);
std::vector<cv::Mat> masks;
Mat mask0(image0.size(), CV_8U);
mask0(Rect(0, 0, mask0.cols, mask0.rows)).setTo(255);
Mat mask1(image1.size(), CV_8U);
mask1(Rect(0, 0, mask1.cols, mask1.rows)).setTo(255);
masks.push_back(mask0);
masks.push_back(mask1);
std::vector<cv::Mat> sources;
sources.push_back(image0);
sources.push_back(image1);
cv::detail::GraphCutSeamFinder seam_finder;
seam_finder.find(sources, corners, masks);
printf("%lu\n", masks.size());
for(int i = 0; i < masks.size(); i++)
{
std::cout << "MASK = "<< std::endl << " " << masks.at(i) << std::endl << std::endl;
}
return 0;
}
and the images that I am using are:
The masks that I am getting is all 255 for image 0 and all zero for image 1.
What is the problem and how can I fix it?
Edit1
I noted that input images should be in tif format so the application can see the transparent pixels in each image so here is the images files in tif format:
I used smartblend (http://wiki.panotools.org/SmartBlend) to blend these two images and I can get this image:
After I do some image manipulation, and apply mask, I get what I want. I can clearly see on imshow result of "crop" that there's gray pixels in the middle of image.
I'm trying to get the maximum pixel value location. I've checked the crop.channels(), which returns 1.
Mat mask = drawing2;
drawContours(mask, contours, -1, Scalar(255), CV_FILLED);
Mat dist;
distanceTransform( cannyInv, dist, CV_DIST_L2, 3 );
normalize(dist,dist,0.0,1.0,NORM_MINMAX);
Mat crop;
dist.copyTo(crop, mask);
cout << "max.. "<< *std::max_element(crop.begin<double>(),crop.end<double>()) <<endl;
which returns max.. 4.25593e-08
for(int y = 0; y < crop.rows; y++)
{
for(int x = 0; x < crop.cols; x++)
{
if (crop.at<unsigned char>(x,y) > 0){
cout << "X........"<<x<<" Y......"<<y<< " = "<<crop.at<unsigned char>(x,y) <<endl;
}
}
}
The output is:
X........604 Y......479 = ¿
X........607 Y......479 =
X........610 Y......479 = ¿
Help me please
PD: I know that there's similar question. But this is specific problem.
I'm not sure how I solved it. A lot of time has passed. But the code that currently I have and it works is this:
Mat dist=Mat::zeros(480,640, CV_8UC1);;
distanceTransform( cannyInv, dist, CV_DIST_L2, 3 );
Mat distNorm;
dist.convertTo(distNorm, CV_8UC1,1,0);
Mat result= Mat::zeros(480,640, CV_8UC1);
distNorm.copyTo(result, mask);
Mat tmp=Mat::zeros(480,640, CV_8UC1);
Mat fik=Mat::zeros(480,640, CV_8UC3);
for(int i = 0; i < result.rows; i++)
{
for(int j = 0; j < result.cols; j++)
{
if ( result.at< uchar >( i,j ) > 0){
uchar val = result.at< uchar >( i,j );
if(val>maxVal){
if(val>0){
cv::circle(tmp,cvPoint(j,i),2,255,-1);
}
maxVal=val;
maxX = j;
maxY = i;
}
}
}
}
Are you sure that normalizing the Mat automatically converts it from uchar to double? It's very likely the data is still stored as uchars and you're reading wrong numbers from it.
Try dist.convertTo(dist, CV_64F);
Print the numbers as doubles everywhere
OR work only with uchars.
Try this code:
cout << "X........"
<< x
<< " Y......"
<< y
<< " = "
<< (double) crop.at< unsigned char>(x,y) <<endl;