I got an example of making contours around what it sees in a bitmap picture, and it works great!
But, the code is going to be part of a bigger project using leap motion and I need it to run in a console and just give me the values. I don't need any windows to be opened.
So when I am running the following code, I get _pFirstBlock == pHead. I searched for the error and it seemed to be that my DLL's are not in Multi-threaded Debug DLL (/MDd), but they are. I know this because if I delete everything in main(), the application works fine. If I run the application as usual, I get the error at the end of main() - when releasing memory. I put all my code from main() in an empty scope just to validate this, and it crashes with the same error at the end of the scope.
So it clearly has something to do with releasing memory.
What would it be?
Using OpenCV 2413
Full code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 20;
RNG rng(12345);
int main(int, char** argv)
{
{
src = imread(argv[1], 1);
cvtColor(src, src_gray, COLOR_BGR2GRAY);
blur(src_gray, src_gray, Size(3, 3));
cout << endl;
Mat threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
threshold(src_gray, threshold_output, thresh, 255, THRESH_BINARY);
findContours(threshold_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0));
vector<vector<Point> > contours_poly(contours.size());
vector<Point2f>center(contours.size());
vector<float>radius(contours.size());
for (size_t i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), contours_poly[i], 3, true);
minEnclosingCircle(contours_poly[i], center[i], radius[i]);
}
cout << "Found " << contours.size() << " contours. " << endl;
Mat drawing = Mat::zeros(threshold_output.size(), CV_8UC3);
for (size_t i = 0; i < contours.size(); i++)
{
Scalar color = Scalar(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
circle(drawing, center[i], (int)radius[i], color, 2, 8, 0);
cout << "Found one at: (" << center[i].x << ", " << center[i].y << ") radius = " << radius[i] << endl;
}
drawing.release();
threshold_output.release();
src_gray.release();
src.release();
} //////////// CRASHES
return(0);
}
Related
I'm new in OpenCV. I'm using OpenCV - 2.4.12 on visual studio 2013, windows 10. I'm trying to create a program that will take 2 images as input and try to find a similar block of 1st image in the 2nd image. by finding features and Homography.. basically I'm following This tutorial. and implemented the code successfully. so I wanted to take things 1 step further, I wanted to crop the matched block... so, I successfully created a mask image, but when I try to bitwise_and, or similar thing, it shows the following error.
Unhandled exception at 0x772FD928 in OpenCVTut.exe Microsoft C++ exception: cv::Exception at memory location 0x0017E6C0.
I've tried googling a lot... but couldn't find any solution. following is the code, and the images I'm using and the mask that I generated..
#include <iostream>
#include <opencv2\opencv.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\calib3d\calib3d.hpp>
#include <opencv2\features2d\features2d.hpp>
#include <opencv2\nonfree\nonfree.hpp>
using namespace std;
using namespace cv;
int main() {
Mat imgObject = cvLoadImage("E:/opencv/images/Experiments/target.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat imgScene = cvLoadImage("E:/opencv/images/Experiments/source.jpg", CV_LOAD_IMAGE_GRAYSCALE);
if (!imgObject.data || !imgScene.data) {
cout << "Error reading images" << endl;
return -1;
}
int minHessian = 400;
SurfFeatureDetector detector(minHessian);
vector<KeyPoint> keyPointsObject;
vector<KeyPoint> keyPointsScene;
detector.detect(imgObject, keyPointsObject);
detector.detect(imgScene, keyPointsScene);
SurfDescriptorExtractor extractor;
Mat descriptorObject;
Mat descriptorScene;
extractor.compute(imgObject, keyPointsObject, descriptorObject);
extractor.compute(imgScene, keyPointsScene, descriptorScene);
FlannBasedMatcher matcher;
vector<DMatch> matches;
matcher.match(descriptorObject, descriptorScene, matches);
double maxDist = 0;
double minDist = 100;
for (int i = 0; i < descriptorObject.rows; i++) {
double dist = matches[i].distance;
if (dist > maxDist) maxDist = dist;
if (dist < minDist) minDist = dist;
}
cout << "-- Max dist : " << maxDist << endl;
cout << "-- Min dist : " << minDist << endl;
vector<DMatch> goodMatches;
for (int i = 0; i < descriptorObject.rows; i++) {
if (matches[i].distance < 3 * minDist) {
goodMatches.push_back(matches[i]);
}
}
/*Mat imgMatches;
drawMatches(imgObject, keyPointsObject, imgScene, keyPointsScene,
goodMatches, imgMatches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);*/
vector<Point2f> obj;
vector<Point2f> scene;
for (int i = 0; i < goodMatches.size(); i++) {
obj.push_back(keyPointsObject[goodMatches[i].queryIdx].pt);
scene.push_back(keyPointsScene[goodMatches[i].trainIdx].pt);
}
Mat H = findHomography(obj, scene, CV_RANSAC);
vector<Point2f> objCorners(4);
objCorners[0] = cvPoint(0, 0);
objCorners[1] = cvPoint(imgObject.cols, 0);
objCorners[2] = cvPoint(imgObject.cols, imgObject.rows);
objCorners[3] = cvPoint(0, imgObject.rows);
vector<Point2f> sceneCorners(4);
perspectiveTransform(objCorners, sceneCorners, H);
line(imgScene, sceneCorners[0], sceneCorners[1], Scalar(0, 255, 0), 4);
line(imgScene, sceneCorners[1], sceneCorners[2], Scalar(0, 255, 0), 4);
line(imgScene, sceneCorners[2], sceneCorners[3], Scalar(0, 255, 0), 4);
line(imgScene, sceneCorners[3], sceneCorners[0], Scalar(0, 255, 0), 4);
Mat mask = Mat::zeros(imgScene.rows, imgScene.cols, CV_8UC3);
vector< vector<Point> > contours;
vector< Vec4i > hierarchy;
Mat coun;
imgScene.copyTo(coun);
findContours(coun, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
Scalar color(255, 255, 255);
drawContours(mask, contours, 0, color, CV_FILLED, 8, hierarchy);
Mat element = getStructuringElement(MORPH_RECT, Size(2, 2), Point(0, 0));
dilate(mask, mask, element);
erode(mask, mask, element);
Mat res(imgScene.rows, imgScene.cols, CV_8UC1, Scalar(0, 0, 0));
bitwise_and(imgScene, mask, res);
namedWindow("Good Matches & Object detection", CV_WINDOW_AUTOSIZE);
imshow("Good Matches & Object detection", mask);
waitKey(0);
return 0;
}
Scene
Target
Mask
So, can anyone please explain the error... and what I need to do to solve it..
Thanks in advance :)
The error occurs at line:
bitwise_and(imgScene, mask, res);
because the two matrices have different type: imgScene is a CV_8UC1 matrix and mask is a CV_8UC3.
Since a mask is usually just a binary image, that can be safely represented with a single channel matrix, you can fix your code making mask a CV_8UC1 matrix:
Mat mask = Mat::zeros(imgScene.rows, imgScene.cols, CV_8UC1); // Instead of CV_8UC3
I tried to use findContour() to get contours from following picture
The code is like this:
Mat mat = imread("123.jpg");
cv::imshow("123.jpg",mat);
Mat hsv;
cvtColor(mat,hsv,COLOR_BGR2HSV);
Mat dst;
inRange(hsv,Scalar(0,0,49),Scalar(47,165,111),dst);
cout<<"dst channels:"<<dst.channels()<<endl;
int c0=0,c255=0,other=0;
for(int i=0;i<dst.rows;i++){
for(int j=0;j<dst.cols;j++){
int v = dst.at<uchar>(i,j);
if(v == 0){
c0++;
}else if(v == 255){
c255++;
}else{
other++;
}
}
}
cout<<"0 count:"<<c0<<",255 count:"<<c255<<",other value count:"<<other<<endl;
cv::erode(dst,dst,cv::Mat(),cv::Point(1,1),6);
cv::dilate(dst,dst,cv::Mat(),cv::Point(1,1),6);
cv::medianBlur(dst,dst,15);
cv::imshow("inRange",dst);
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
Mat temp;
dst.copyTo(temp);
findContours(temp,contours,hierarchy,CV_RETR_EXTERNAL,CV_CHAIN_APPROX_SIMPLE );
cout<<"contours size:"<<contours.size()<<endl;
cout<<"hierarchy size:"<<hierarchy.size()<<endl;
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
cout<<"hierarchy at:"<<index<<","<<hierarchy[index]<<endl;
cv::drawContours(mat,contours,index,Scalar(120,255,0));
}
cv::imshow("contours",mat);
waitKey(0);
return 0;
the binary image is like this:
the final contour image is like this:
The log printed in console window is like this:
I don't know why contours size is smaller than hierarchy size. in fact, there should be three "EXTERNAL" shapes. seems missed one shape in output contours. this issue trapped me for a day and i'm going to be crazy now...anybody knows why there are only two elements in contours ?did i do something wrong?
This works as expected for me:
contours size:3
hierarchy size:3
hierarchy at:0,[1, -1, -1, -1]
hierarchy at:1,[2, 0, -1, -1]
hierarchy at:2,[-1, 1, -1, -1]
Possible problems in your code are:
dst may be incorrectly initialized
dst may not be binary, i.e. can have values different from 0 and 255
Code:
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main()
{
// Load image
Mat3b img = imread("path_to_image");
// Convert to grayscale
Mat1b bin;
cvtColor(img, bin, COLOR_BGR2GRAY);
// Binarize (remove jpeg artifacts)
bin = bin > 100;
vector< vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(bin.clone(), contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
cout << "contours size:" << contours.size() << endl;
cout << "hierarchy size:" << hierarchy.size() << endl;
for (int index = 0; index >= 0; index = hierarchy[index][0]) {
cout << "hierarchy at:" << index << "," << hierarchy[index] << endl;
cv::drawContours(img, contours, index, Scalar(120, 255, 0), 2);
}
imshow("Result", img);
waitKey();
return 0;
}
My VS tool was linked to ..\opencv\build\x64\vc12\lib and I was experiencing similar issues.
I solved this by changing the link to ..\opencv\build\x86\vc12\lib
I'm using Visual Studio 2010 on Windows 7.
So I have two Thermal images (Potato quality I know but it is what I have to work with), the first two images in this album. I am using some code from a tutorial that is super common but have edited a lot of it.
http://imgur.com/a/Zch7C
So what I am doing in my code is
1. Detecting KeyPoints
2. Describe the KeyPoints
3. Match the KeyPoints
4. Keep only good points
5. Gather both Query and Train points
6. Find Homography
7. Warp one of the images
8. Repeat the above steps for the warped image and the other original image
Now my question is: Should the change in the (x,y) distance between two of the same points on the two different images be the same for every set of points?
The whole frame is moving in the same direction so no matter what matching points we look at the change should be the same should it not?
What I am finding is that the points all different in the distance, some are 5 pixels different and some are 700 pixels, the only thing I can think is happening is that the match is not actually good and it is comparing two points that are no where near the same point in the separate frames.
I need to know what the offset is so that I can overlay one frame on top of the other then average out the pixel values that are overlapping and build a new images from the composite/average of the two originals.
My code I am using is below:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "stitch.cpp"
#include "opencv2\stitching\stitcher.hpp"
#include "opencv2\nonfree\features2d.hpp"
using namespace cv;
void readme();
Mat describe(Mat img, vector<KeyPoint> key);
vector<KeyPoint> detect(Mat img);
vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo);
/** #function main */
int main(int argc, char** argv)
{
VideoCapture cap("vid.mp4");
vector<Mat> Vimg;
cout << "Grabbing Images" << endl;
for (int i = 0; i < 2; i++)
{
cout << "Grabbing Frame" << i << endl;
Mat temp;
cap.read(temp);
Vimg.push_back(temp);
imwrite("image" + to_string(i) + ".jpg", temp);
for (int j = 0; j < 80; j++)
cap.grab();
}
//Mat cimg1 = Vimg[0];
//Mat cimg2 = Vimg[1];
Mat cimg1 = imread("cap1.png");
Mat cimg2 = imread("cap2.png");
cout << "Starting Stitching" << endl;
//Converting the original images to grayscale
Mat img1, img2;
cvtColor(cimg1, img1, CV_BGR2GRAY);
cvtColor(cimg2, img2, CV_BGR2GRAY);
//Detecting Keypoints for original two images
vector<KeyPoint> keypointOne = detect(img1), keypointTwo = detect(img2);
Mat mkeypointOne, mkeypointTwo;
drawKeypoints(cimg1, keypointOne, mkeypointOne, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
drawKeypoints(cimg2, keypointTwo, mkeypointTwo, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("keypointOne.jpg", mkeypointOne);
imwrite("keypointTwo.jpg", mkeypointTwo);
//Computing descriptors
Mat descriptionOne = describe(img1, keypointOne), descriptionTwo = describe(img2, keypointTwo);
//Matching descriptors
vector<DMatch> matches = match(descriptionOne, descriptionTwo);
double max = 0;
double min = 100;
//Calculation of max and min distances
for (int i = 0; i < matches.size(); i++)
{
double dist = matches[i].distance;
if (dist < min) min = dist;
if (dist > max) max = dist;
}
vector<DMatch> goodMatches;
//Keep only good matches
for (int i = 0; i < matches.size(); i++)
{
if (matches[i].distance < 2*min)
goodMatches.push_back(matches[i]);
}
//Localize
vector<Point2f> obj;
vector<Point2f> scene;
for (int i = 0; i < goodMatches.size(); i++)
{
obj.push_back(keypointOne[goodMatches[i].queryIdx].pt);
scene.push_back(keypointTwo[goodMatches[i].trainIdx].pt);
}
/*
for (int k = 0; k < obj.size(); k++)
{
cout << "Point data for Match #" << k << endl;
cout << "\tImage 1 Point: " << obj[k] << endl;
cout << "\tImage 2 Point: " << scene[k] << endl;
}*/
Mat H = findHomography(obj, scene, CV_RANSAC);
//Warping the image to fit on first image
Mat cwarpImage, warpImage;
//TODO: figure out the right size for this image that is created
warpPerspective(cimg2, cwarpImage, H, Size(img2.cols + img1.cols, img2.rows + img1.rows));
/*
Mat result;
Mat half(warpImage, Rect(0, 0, img2.cols, img2.rows));
cimg2.copyTo(half);
*/
imwrite("warp.jpg", warpImage);
//Processing Image
cvtColor(cwarpImage, warpImage, CV_BGR2GRAY);
vector<KeyPoint> keypointWarp = detect(warpImage);
Mat descriptionWarp = describe(warpImage, keypointWarp);
vector<DMatch> warpMatches = match(descriptionOne, descriptionWarp);
Mat mkeypointWarp;
drawKeypoints(cwarpImage, keypointWarp, mkeypointWarp, Scalar(0, 0, 255), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("keypointWarp.jpg", mkeypointWarp);
Mat match;
drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//imshow("match", match);
imwrite("matches.jpg", match);
//Localize
vector<Point2f> obj2;
vector<Point2f> scene2;
for (int i = 0; i < warpMatches.size(); i++)
{
obj2.push_back(keypointOne[warpMatches[i].queryIdx].pt);
scene2.push_back(keypointWarp[warpMatches[i].trainIdx].pt);
}
for (int k = 0; k < obj.size(); k++)
{
cout << "Point data for Match #" << k << endl;
cout << "\tImage 1 Point: " << obj2[k] << endl;
cout << "\tImage 2 Point: " << scene2[k] << endl;
}
vector<unsigned char> inliersMask;
Mat H2 = findHomography(obj, scene, CV_RANSAC, 3, inliersMask);
vector<DMatch> inliers;
for (size_t i = 0; i < inliersMask.size(); i++)
{
if (inliersMask[i])
inliers.push_back(warpMatches[i]);
}
warpMatches.swap(inliers);
Mat match2;
drawMatches(cimg1, keypointOne, warpImage, keypointWarp, warpMatches, match2, Scalar(0, 0, 255), Scalar(255, 0, 0), vector<char>(), DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
imwrite("homorgraphyOutlierMatch.jpg", match2);
cout << "Writing Warp Image" << endl;
imwrite("warpimage.jpg", warpImage);
cout << H << endl;
waitKey(0);
}
Mat describe(Mat img, vector<KeyPoint> key)
{
Mat temp;
SurfDescriptorExtractor extractor;
extractor.compute(img, key, temp);
return temp;
}
vector<KeyPoint> detect(Mat img)
{
vector<KeyPoint> temp;
SurfFeatureDetector detector(400);
detector.detect(img, temp);
return temp;
}
vector<DMatch> match(Mat descriptionOne, Mat descriptionTwo)
{
vector<DMatch> temp;
BFMatcher matcher(NORM_L2, true);
matcher.match(descriptionOne, descriptionTwo, temp);
return temp;
}
EDIT:
I set Cross Check to true in the BFMatcher and implemented Homography outlier detection from Mastering_OpenCV. Here are the two new results. I was not sure if I was supposed to implement both cross check and KnnMatch so I only did cross check.
http://imgur.com/a/1P7Xt
As you can see they are a lot better but there are still some there that should not be there. I ran it with both full color and thermal images.
New code is above as well.
While the change in distance between point correspondences won't be the same for all points in the general case, you wouldn't expect to have deltas of the order of 700 pixels with an image size of 1300ish.
by inspection of the images you've posted, it's clear that you have point correspondences which are not correct (simply, you have lots of crossed lines in your matches between images)
This suggests that your step 4 isn't doing a great job. You might want to try setting the second parameter of your Brute Force matcher to true to enable cross-check test:
BFMatcher matcher(NORM_L2, true);
You might also want to consider the ratio test for outlier removal as described here How to apply Ratio Test in order to remove outliers in a multiple object detection matcher?
I am working on project which should work as horizon detection. I am using canny edge and contours for horizon detection. It works quite fine, but I have problem with small area of edges>contours which werent eliminated by high cannythreshold and morfolocigal operation. If I use higher threshold on canny I start to loose some of the horizon edges.
So question is, how to get rid of small area of edges/contours? Or how do I display only one biggest contour?
This pictures shows how it should look like:
http://i.stack.imgur.com/f4USX.png
This picture is taken with small area on contours which i need to eliminate:
http://i.stack.imgur.com/TQi0v.jpg
And here is my code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <sstream>
#include <string>
#include <iostream>
#include <opencv\highgui.h>
#include <opencv\cv.h>
#include <opencv\ml.h>
using namespace cv;
using namespace std;
vector<Vec4i> lines;
vector<vector<Point> > contours0;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
int MAX_KERNEL_LENGTH = 31;
int main(int argc, char** argv)
{
string filename = "test.avi";
VideoCapture cap(filename);
if(!cap.isOpened())
return -1;
Mat edges,grey;
namedWindow("edges",1);
for(;;)
{
Mat frame;
cap >> frame;
cvtColor(frame, grey, CV_BGR2GRAY);
GaussianBlur(grey, grey, Size(5,5),0);
Mat erodeElement = getStructuringElement( MORPH_RECT,Size(10,10));
Mat dilateElement = getStructuringElement( MORPH_RECT,Size(10,10));
erode(grey,grey,erodeElement);
dilate(grey,grey,dilateElement);
Canny(grey, edges, 150,300, 3);
findContours( edges, contours0, hierarchy,
CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
contours.resize(contours0.size());
for( size_t k = 0; k < contours0.size(); k++ ){
approxPolyDP(Mat(contours0[k]), contours[k], 5, true);
}
int idx = 0;
for( ; idx >= 0; idx = hierarchy[idx][0] )
{
drawContours( frame, contours, idx, Scalar(128,255,255), 5, 8, hierarchy );
}
imshow("frame", frame);
imshow("grey", grey);
imshow("edges", edges);
if(waitKey(30) >= 0) break;
}
return 0;
}
You can filter your contours depending on their length using arcLength-function (http://docs.opencv.org/modules/imgproc/doc/structural_analysis_and_shape_descriptors.html#arclength).
Either you can check, if the contours are longer than a certain threshold or you only filter the longest contour.
I have the following contour:
https://drive.google.com/file/d/0B45BJEUVEkjseFd3X3RITzM5S3c/edit?usp=sharing
containing the following points (printed in order):
https://drive.google.com/file/d/0B45BJEUVEkjsN3NIRU5lOFBDb00/edit?usp=sharing
However, when I calculate the area of this contour (using the function contourArea), I get 157, which is too low for the size of that contour. I expect it to be in the thousands. Why is the contour area being calculated incorrectly, and how may I fix it?
The following is the code I'm using to calculate the areas of all contours of the image. The contour of interest is the last one. The original image I'm using is here:
https://drive.google.com/file/d/0B45BJEUVEkjsbGhXM3E3UW1lZWs/edit?usp=sharing
int main(int argc, char* argv[])
{
Mat imgOriginal = imread(argv[1], 0);
if(imgOriginal.empty())
return -1;
Mat img;
resize(imgOriginal, img, Size(640, 480));
medianBlur(img, img, 11);
Canny(img, img, 25, 100);
vector< vector<Point> > contours;
findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
for (int i = 0; i < contours.size(); i++)
cout << "Area " << i << ": " << contourArea(contours[i]) << endl;
return 0;
}
Also, I noticed that several points in the contour are duplicates (I'm not sure why), which may be causing the area to be calculated incorrectly.
Maybe because your contour isn't closed?
Update: I see you're feeding it the result of a canny operation. I would try the code on an image with a very well defined closed shape to test the code and rule out a problem in the specific contour. Something like the one used in the moments tutorial.
Maybe this discussion will be helpful.
Because your image has no contours but series of green pixels and a background.you need to close the contour to get contour area.
Correction:
int main(int argc, char* argv[])
{
Mat imgOriginal = imread(argv[1], 0);
if(imgOriginal.empty())
return -1;
Mat img;
resize(imgOriginal, img, Size(640, 480));
medianBlur(img, img, 11);
Canny(img, img, 25, 100);
vector< vector<Point> > contours;
findContours(img, contours, RETR_EXTERNAL, CHAIN_APPROX_NONE);
cout << "Area " << ": " << contourArea(contours[1]) << endl;
return 0;
}