OpenCV C++/Obj-C: Detecting a sheet of paper / Rectangle Detection - c++

I'm trying to detect a paper sheet in an image just like Camscanner.
I've followed the this link for this implementation.
This detects the paper sheet on dark backgrounds accurately but not on a light background.
Sample image:
Code:
void find_squares(Mat& image, vector<vector<cv::Point> >& squares){
// blur will enhance edge detection
Mat blurred(image);
medianBlur(image, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<cv::Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), cv::Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<cv::Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}}
double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);}

Related

Getting a book image using opencv and c++

I'm trying to detect the following book, using findcontours but it cannot be detected at all and I get exception because there is no convex hull.
I tried to blur, dilate, canny detection, with no success at all.
I hope to get a solution for finding a rectangular paper/book using openCV.
Please let me know if you have further questions or need resources.
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2) / sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void find_squares(Mat& image, vector<vector<Point> >& squares)
{
// blur will enhance edge detection
Mat blurred(image);
Mat dst;
medianBlur(image, dst, 9);
Mat gray0(dst.size(), CV_8U), gray;
vector<vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = { c, 0 };
mixChannels(&dst, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
}
else
{
gray = gray0 >= (l + 1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j % 4], approx[j - 2], approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
cv::Mat debugSquares(std::vector<std::vector<cv::Point> > squares, cv::Mat image)
{
for (int i = 0; i< squares.size(); i++) {
// draw contour
cv::drawContours(image, squares, i, cv::Scalar(255, 0, 0), 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
// draw bounding rect
cv::Rect rect = boundingRect(cv::Mat(squares[i]));
cv::rectangle(image, rect.tl(), rect.br(), cv::Scalar(0, 255, 0), 2, 8, 0);
// draw rotated rect
cv::RotatedRect minRect = minAreaRect(cv::Mat(squares[i]));
cv::Point2f rect_points[4];
minRect.points(rect_points);
for (int j = 0; j < 4; j++) {
cv::line(image, rect_points[j], rect_points[(j + 1) % 4], cv::Scalar(0, 0, 255), 1, 8); // blue
}
}
return image;
}
static std::vector<cv::Point> extremePoints(std::vector<cv::Point>pts)
{
int xmin = 0, ymin = 0, xmax = -1, ymax = -1, i;
Point ptxmin, ptymin, ptxmax, ptymax;
Point pt = pts[0];
ptxmin = ptymin = ptxmax = ptymax = pt;
xmin = xmax = pt.x;
ymin = ymax = pt.y;
for (size_t i = 1; i < pts.size(); i++)
{
pt = pts[i];
if (xmin > pt.x)
{
xmin = pt.x;
ptxmin = pt;
}
if (xmax < pt.x)
{
xmax = pt.x;
ptxmax = pt;
}
if (ymin > pt.y)
{
ymin = pt.y;
ptymin = pt;
}
if (ymax < pt.y)
{
ymax = pt.y;
ptymax = pt;
}
}
std::vector<cv::Point> res;
res.push_back(ptxmin);
res.push_back(ptxmax);
res.push_back(ptymin);
res.push_back(ptymax);
return res;
}
void sortCorners(std::vector<cv::Point2f>& corners)
{
std::vector<cv::Point2f> top, bot;
cv::Point2f center;
// Get mass center
for (int i = 0; i < corners.size(); i++)
center += corners[i];
center *= (1. / corners.size());
for (int i = 0; i < corners.size(); i++)
{
if (corners[i].y < center.y)
top.push_back(corners[i]);
else
bot.push_back(corners[i]);
}
corners.clear();
if (top.size() == 2 && bot.size() == 2) {
cv::Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
cv::Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
cv::Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
cv::Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];
corners.push_back(tl);
corners.push_back(tr);
corners.push_back(br);
corners.push_back(bl);
}
}
int main(int, char**)
{
int largest_area = 0;
int largest_contour_index = 0;
cv::Rect bounding_rect;
Mat src, edges;
src = imread("20628991_10159154614610574_1244594322_o.jpg");
cvtColor(src, edges, COLOR_BGR2GRAY);
GaussianBlur(edges, edges, Size(5, 5), 1.5, 1.5);
erode(edges, edges, Mat());// these lines may need to be optimized
dilate(edges, edges, Mat());
dilate(edges, edges, Mat());
erode(edges, edges, Mat());
Canny(edges, edges, 150, 150, 3); // canny parameters may need to be optimized
imshow("edges", edges);
vector<Point> selected;
vector<vector<Point> > contours;
findContours(edges, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
for (size_t i = 0; i < contours.size(); i++)
{
Rect minRect = boundingRect(contours[i]);
if (minRect.width > 150 & minRect.height > 150) // this line also need to be optimized
{
selected.insert(selected.end(), contours[i].begin(), contours[i].end());
}
}
convexHull(selected, selected);
RotatedRect minRect = minAreaRect(selected);
std::vector<cv::Point> corner_points = extremePoints(selected);
std::vector<cv::Point2f> corners;
corners.push_back(corner_points[0]);
corners.push_back(corner_points[1]);
corners.push_back(corner_points[2]);
corners.push_back(corner_points[3]);
sortCorners(corners);
cv::Mat quad = cv::Mat::zeros(norm(corners[1] - corners[2]), norm(corners[2] - corners[3]), CV_8UC3);
std::vector<cv::Point2f> quad_pts;
quad_pts.push_back(cv::Point2f(0, 0));
quad_pts.push_back(cv::Point2f(quad.cols, 0));
quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
quad_pts.push_back(cv::Point2f(0, quad.rows));
cv::Mat transmtx = cv::getPerspectiveTransform(corners, quad_pts);
cv::warpPerspective(src, quad, transmtx, quad.size());
resize(quad, quad, Size(), 0.25, 0.25); // you can remove this line to keep the image original size
imshow("quad", quad);
polylines(src, selected, true, Scalar(0, 0, 255), 2);
resize(src, src, Size(), 0.5, 0.5); // you can remove this line to keep the image original size
imshow("result", src);
waitKey(0);
return 0;
}
Strange, I did it with exactly that (blur, dilate, canny):
The code (in Python, but there's nothing but OpenCV function calls, so should be easy to follow; as one of the references I used this answer, which is in C++, it also shows how to correct the perspective and turn it into a rectangle):
import numpy as np
import cv2
img = cv2.imread('sngo1.jpg')
#resize and create a copy for future drawing
resize_coeff = 0.5
w, h, c = img.shape
img_in = cv2.resize(img, (int(resize_coeff*h), int(resize_coeff*w)))
img_out = img_in.copy()
#median and canny
img_in = cv2.medianBlur(img_in, 5)
img_in = cv2.Canny(img_in, 100, 200)
#morphological close for our edges
kernel = np.ones((17, 17), np.uint8)
img_in = cv2.morphologyEx(img_in, cv2.MORPH_CLOSE, kernel, iterations = 1)
#find contours, get max by area
img_in, contours, hierarchy = cv2.findContours(img_in, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_index, max_area = max(enumerate([cv2.contourArea(x) for x in contours]), key = lambda x: x[1])
max_contour = contours[max_index]
#approximage it with a quadrangle
approx = cv2.approxPolyDP(max_contour, 0.1*cv2.arcLength(max_contour, True), True)
approx = approx[:,0,:]
cv2.drawContours(img_out, [approx], 0, (255, 0, 0), 2)
cv2.imwrite("result.png", img_out)

Detect not regular rectangular object does not work correct

I would like to detect not regular rectangular objects using OpenCV and approxPolyDP function. In the image I have 4 windows that can be approximate as rectangular contour using the approxPolyDP. But Im not able to detect all of them when set up certain threshold. I filter the windows using the wodth and the height and the angel between the vertices. I would like to have more robust code that can detect all 4 windows. Any help?Here my code
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 5;
int max_thresh = 60000;
RNG rng(12345);
void thresh_callback(int, void* );
static double angle(Point pt1, Point pt2, Point pt0)
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void setLabel(cv::Mat& im, const std::string label, std::vector<cv::Point>& contour)
{
int fontface = cv::FONT_HERSHEY_SIMPLEX;
double scale = 0.4;
int thickness = 1;
int baseline = 0;
cv::Size text = cv::getTextSize(label, fontface, scale, thickness, &baseline);
cv::Rect r = cv::boundingRect(contour);
cv::Point pt(r.x + ((r.width - text.width) / 2), r.y + ((r.height + text.height) / 2));
cv::rectangle(im, pt + cv::Point(0, baseline), pt + cv::Point(text.width, -text.height), CV_RGB(255,255,255), CV_FILLED);
cv::putText(im, label, pt, fontface, scale, CV_RGB(0,0,0), thickness, 8);
}
int main()
{
cv::Mat src = cv::imread("p3.png");
resize(src, src, Size(640,480), 0, 0, INTER_CUBIC);
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
void thresh_callback(int, void* ) {
src = imread("p3.png");
resize(src, src, Size(640,480), 0, 0, INTER_CUBIC);
//================================
Mat gray,bw,dil,erd;
Canny(gray,bw,thresh, thresh*1, 5);
//Canny( src_gray, canny_output, thresh, thresh*1, 3 );
dilate(bw,dil,Mat());
erode(dil,erd,Mat());
Mat tmp=bw.clone();
Size kernalSize (15,20);
Mat element = getStructuringElement (MORPH_RECT, kernalSize, Point(4,4) );
morphologyEx( bw, bw, MORPH_CLOSE, element );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;//novo
findContours(bw.clone(), contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
Mat dst = src.clone();
for( int i = 0; i< contours.size(); i++ )
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true) * 0.09, true);
if (approx.size() >= 4.0 && (approx.size() <= 4.1))
{
int vtc = approx.size();
vector<double> cos;
for(int j = 2; j < vtc + 1; j++)
cos.push_back(angle(approx[j%vtc], approx[j-2], approx[j-1]));
sort(cos.begin(), cos.end());
double mincos = cos.front();
double maxcos = cos.back();
if (vtc == 4 && mincos >= -0.6 && maxcos <= 0.6)
{
Rect r = boundingRect(contours[i]);
double ratio = abs(1 - (double)r.width / r.height);
if (r.height >50 && r.width >20 && r.height < 640 && r.width < 640 && r.height>r.width ) /* constraints on region size */
{
//Rect r = boundingRect(contours[i]);
//double ratio = abs(1 - (double)r.width / r.height);
line(dst, approx.at(0), approx.at(1), cvScalar(0,0,255),4);
line(dst, approx.at(1), approx.at(2), cvScalar(0,0,255),4);
line(dst, approx.at(2), approx.at(3), cvScalar(0,0,255),4);
line(dst, approx.at(3), approx.at(0), cvScalar(0,0,255),4);
}
}
}
}
}
Here the source image
And here detected windows
Here the drawing/visual contours

Error with PCA in OpenCV 3

I am trying to run a sample code from OpenCV for PCA from this link
PCA example.
But after I run it breaks down. I debugged and I saw that it breaks down inside below for loop which is in getOrientation function :
for (int i = 0; i < 2; ++i)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i);
}
I searched before in stackoverflow ,there were questions with similar title but not the same error. Any help? Thanks
Here is the sample code:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
// Function declarations
void drawAxis(Mat&, Point, Point, Scalar, const float);
double getOrientation(const vector<Point> &, Mat&);
void drawAxis(Mat& img, Point p, Point q, Scalar colour, const float scale = 0.2)
{
double angle;
double hypotenuse;
angle = atan2((double)p.y - q.y, (double)p.x - q.x); // angle in radians
hypotenuse = sqrt((double)(p.y - q.y) * (p.y - q.y) + (p.x - q.x) * (p.x - q.x));
// double degrees = angle * 180 / CV_PI; // convert radians to degrees (0-180 range)
// cout << "Degrees: " << abs(degrees - 180) << endl; // angle in 0-360 degrees range
// Here we lengthen the arrow by a factor of scale
q.x = (int)(p.x - scale * hypotenuse * cos(angle));
q.y = (int)(p.y - scale * hypotenuse * sin(angle));
line(img, p, q, colour, 1, CV_AA);
// create the arrow hooks
p.x = (int)(q.x + 9 * cos(angle + CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle + CV_PI / 4));
line(img, p, q, colour, 1, CV_AA);
p.x = (int)(q.x + 9 * cos(angle - CV_PI / 4));
p.y = (int)(q.y + 9 * sin(angle - CV_PI / 4));
line(img, p, q, colour, 1, CV_AA);
}
double getOrientation(const vector<Point> &pts, Mat &img)
{
//Construct a buffer used by the pca analysis
int sz = static_cast<int>(pts.size());
Mat data_pts = Mat(sz, 2, CV_64FC1);
for (int i = 0; i < data_pts.rows; ++i)
{
data_pts.at<double>(i, 0) = pts[i].x;
data_pts.at<double>(i, 1) = pts[i].y;
}
//Perform PCA analysis
PCA pca_analysis(data_pts, Mat(), CV_PCA_DATA_AS_ROW);
//Store the center of the object
Point cntr = Point(static_cast<int>(pca_analysis.mean.at<double>(0, 0)),
static_cast<int>(pca_analysis.mean.at<double>(0, 1)));
//Store the eigenvalues and eigenvectors
vector<Point2d> eigen_vecs(2);
vector<double> eigen_val(2);
*for (int i = 0; i < 2; ++i)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(0, i);
}*
// Draw the principal components
circle(img, cntr, 3, Scalar(255, 0, 255), 2);
Point p1 = cntr + 0.02 * Point(static_cast<int>(eigen_vecs[0].x * eigen_val[0]), static_cast<int>(eigen_vecs[0].y * eigen_val[0]));
Point p2 = cntr - 0.02 * Point(static_cast<int>(eigen_vecs[1].x * eigen_val[1]), static_cast<int>(eigen_vecs[1].y * eigen_val[1]));
drawAxis(img, cntr, p1, Scalar(0, 255, 0), 1);
drawAxis(img, cntr, p2, Scalar(255, 255, 0), 5);
double angle = atan2(eigen_vecs[0].y, eigen_vecs[0].x); // orientation in radians
return angle;
}
int main(int, char** argv)
{
// Load image
Mat src = imread("C:/Users/aydin/Desktop/c++/pictures/pca_test1.jpg");
//Mat src = imread(argv[1]);
// Check if image is loaded successfully
if (!src.data || src.empty())
{
cout << "Problem loading image!!!" << endl;
return EXIT_FAILURE;
}
imshow("src", src);
// Convert image to grayscale
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
// Convert image to binary
Mat bw;
threshold(gray, bw, 50, 255, CV_THRESH_BINARY | CV_THRESH_OTSU);
// Find all the contours in the thresholded image
vector<Vec4i> hierarchy;
vector<vector<Point> > contours;
findContours(bw, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_NONE);
for (size_t i = 0; i < contours.size(); ++i)
{
// Calculate the area of each contour
double area = contourArea(contours[i]);
// Ignore contours that are too small or too large
if (area < 1e2 || 1e5 < area) continue;
// Draw each contour only for visualisation purposes
drawContours(src, contours, static_cast<int>(i), Scalar(0, 0, 255), 2, 8, hierarchy, 0);
// Find the orientation of each shape
//getOrientation(contours[i], src);
}
imshow("output", src);
waitKey(0);
return 0;
}
No,it is not about environment ;I don't know,how you made it work ,but afterwards I saw same error here and he put the correct code saying that it had bugs.however comparing this code with original one I saw that in "for loop" that i mentioned as problematic
for (int i = 0; i < 2; ++i)
{
eigen_vecs[i] = Point2d(pca_analysis.eigenvectors.at<double>(i, 0),
pca_analysis.eigenvectors.at<double>(i, 1));
eigen_val[i] = pca_analysis.eigenvalues.at<double>(0,i);
}
,he changed last paranthesis from (0,i) to (i) . I did same and it worked ,but I don't understand why,can you tell me why?

Detect largest rectangle from an image using OpenCV

I asked a previous question here and following the advice from the answer I built the below program which I thought would detect large rectangle but it doesn't detect the rectangle at all. It does work on this image though.
Original Image
Desired Image
I want the solution to work on not only this image but different images of this kind. Major part of the code below is from different answers on SO
My full program:
#include <cv.h>
#include <highgui.h>
using namespace cv;
using namespace std;
double angle( Point pt1, Point pt2, Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void find_squares( Mat& image, vector< vector< Point> >& squares)
{
// blur will enhance edge detection
Mat blurred(image);
medianBlur(image, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector< vector< Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector< Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP( Mat(contours[i]), approx, arcLength( Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea( Mat(approx))) > 1000 &&
isContourConvex( Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
void find_largest_square(const vector<vector <Point> >& squares, vector<Point>& biggest_square) {
if (!squares.size()) {
return;
}
int max_width = 0;
int max_height = 0;
int max_square_idx = 0;
const int n_points = 4;
for (size_t i = 0; i < squares.size(); i++) {
Rect rectangle = boundingRect(Mat(squares[i]));
if ((rectangle.width >= max_width) && (rectangle.height >= max_height)) {
max_width = rectangle.width;
max_height = rectangle.height;
max_square_idx = i;
}
}
biggest_square = squares[max_square_idx];
}
int main(int argc, char* argv[])
{
Mat img = imread(argv[1]);
if (img.empty())
{
cout << "!!! imread() failed to open target image" << endl;
return -1;
}
vector< vector< Point> > squares;
find_squares(img, squares);
vector<Point> largest_square;
find_largest_square(squares, largest_square);
for (int i = 0; i < 4; ++i) {
line(img, largest_square[i], largest_square[(i+1)%4], Scalar(0, 255, 0), 1, CV_AA);
}
imwrite("squares.png", img);
imshow("squares", img);
waitKey(0);
return 0;
}
I think you can do it easily using findContours function - http://docs.opencv.org/doc/tutorials/imgproc/shapedescriptors/find_contours/find_contours.html The biggest contour (or eventually second biggest) should be contour of black rectangle. Then just find the smallest rectangle which will surround this contour (just find points with the biggest/smallest x/y coordinates).

findContours gives memory heap error

I have following picture and try to find the largest rectangle with OpenCV with these lines
std::vector< std::vector<cv::Point> > contours;
cv::findContours(result,contours,CV_RETR_LIST,CV_CHAIN_APPROX_SIMPLE);
But the statements above causes memory heap error. Can anyone give me a clue why this is happening? I have been stretching my hairs for last couple of hours.
I think it's something to do with cv::Point allocator since call stack indicates it.
Update: I just ran the program with CvFindContours instead without any problem. So it must be OpenCV 2.3.1.
Update2: Thanks to #karlphillip answer, I revisited my project and it was my Visual Studio project setting. I was linking MFC as static library because of annoying memork leak message. That was the cause of the problem. When I use MFC as shared DLL, the problem went away.
I've just tested the following application with OpenCV 2.3.1 on both Linux and Windows XP (32bits) and I had no problems.
Unless you can write a minimal application to reproduce the problem you are observing, this is as far as I go.
This is the input image, and the code is right below:
#include <cv.h>
#include <highgui.h>
using namespace cv;
double angle( cv::Point pt1, cv::Point pt2, cv::Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void find_squares(Mat& image, vector<vector<Point> >& squares)
{
// blur will enhance edge detection
Mat blurred(image);
medianBlur(image, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
int main()
{
Mat img = imread("paper.jpg");
vector<vector<Point> > squares;
find_squares(img, squares);
std::cout << "squares size: " << squares.size() << std::endl;
getchar();
return 0;
}