Related
I'm trying to detect the following book, using findcontours but it cannot be detected at all and I get exception because there is no convex hull.
I tried to blur, dilate, canny detection, with no success at all.
I hope to get a solution for finding a rectangular paper/book using openCV.
Please let me know if you have further questions or need resources.
#include "opencv2/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include <iostream>
using namespace cv;
using namespace std;
double angle(cv::Point pt1, cv::Point pt2, cv::Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2) / sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void find_squares(Mat& image, vector<vector<Point> >& squares)
{
// blur will enhance edge detection
Mat blurred(image);
Mat dst;
medianBlur(image, dst, 9);
Mat gray0(dst.size(), CV_8U), gray;
vector<vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++)
{
int ch[] = { c, 0 };
mixChannels(&dst, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
}
else
{
gray = gray0 >= (l + 1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j % 4], approx[j - 2], approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
}
}
cv::Mat debugSquares(std::vector<std::vector<cv::Point> > squares, cv::Mat image)
{
for (int i = 0; i< squares.size(); i++) {
// draw contour
cv::drawContours(image, squares, i, cv::Scalar(255, 0, 0), 1, 8, std::vector<cv::Vec4i>(), 0, cv::Point());
// draw bounding rect
cv::Rect rect = boundingRect(cv::Mat(squares[i]));
cv::rectangle(image, rect.tl(), rect.br(), cv::Scalar(0, 255, 0), 2, 8, 0);
// draw rotated rect
cv::RotatedRect minRect = minAreaRect(cv::Mat(squares[i]));
cv::Point2f rect_points[4];
minRect.points(rect_points);
for (int j = 0; j < 4; j++) {
cv::line(image, rect_points[j], rect_points[(j + 1) % 4], cv::Scalar(0, 0, 255), 1, 8); // blue
}
}
return image;
}
static std::vector<cv::Point> extremePoints(std::vector<cv::Point>pts)
{
int xmin = 0, ymin = 0, xmax = -1, ymax = -1, i;
Point ptxmin, ptymin, ptxmax, ptymax;
Point pt = pts[0];
ptxmin = ptymin = ptxmax = ptymax = pt;
xmin = xmax = pt.x;
ymin = ymax = pt.y;
for (size_t i = 1; i < pts.size(); i++)
{
pt = pts[i];
if (xmin > pt.x)
{
xmin = pt.x;
ptxmin = pt;
}
if (xmax < pt.x)
{
xmax = pt.x;
ptxmax = pt;
}
if (ymin > pt.y)
{
ymin = pt.y;
ptymin = pt;
}
if (ymax < pt.y)
{
ymax = pt.y;
ptymax = pt;
}
}
std::vector<cv::Point> res;
res.push_back(ptxmin);
res.push_back(ptxmax);
res.push_back(ptymin);
res.push_back(ptymax);
return res;
}
void sortCorners(std::vector<cv::Point2f>& corners)
{
std::vector<cv::Point2f> top, bot;
cv::Point2f center;
// Get mass center
for (int i = 0; i < corners.size(); i++)
center += corners[i];
center *= (1. / corners.size());
for (int i = 0; i < corners.size(); i++)
{
if (corners[i].y < center.y)
top.push_back(corners[i]);
else
bot.push_back(corners[i]);
}
corners.clear();
if (top.size() == 2 && bot.size() == 2) {
cv::Point2f tl = top[0].x > top[1].x ? top[1] : top[0];
cv::Point2f tr = top[0].x > top[1].x ? top[0] : top[1];
cv::Point2f bl = bot[0].x > bot[1].x ? bot[1] : bot[0];
cv::Point2f br = bot[0].x > bot[1].x ? bot[0] : bot[1];
corners.push_back(tl);
corners.push_back(tr);
corners.push_back(br);
corners.push_back(bl);
}
}
int main(int, char**)
{
int largest_area = 0;
int largest_contour_index = 0;
cv::Rect bounding_rect;
Mat src, edges;
src = imread("20628991_10159154614610574_1244594322_o.jpg");
cvtColor(src, edges, COLOR_BGR2GRAY);
GaussianBlur(edges, edges, Size(5, 5), 1.5, 1.5);
erode(edges, edges, Mat());// these lines may need to be optimized
dilate(edges, edges, Mat());
dilate(edges, edges, Mat());
erode(edges, edges, Mat());
Canny(edges, edges, 150, 150, 3); // canny parameters may need to be optimized
imshow("edges", edges);
vector<Point> selected;
vector<vector<Point> > contours;
findContours(edges, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
for (size_t i = 0; i < contours.size(); i++)
{
Rect minRect = boundingRect(contours[i]);
if (minRect.width > 150 & minRect.height > 150) // this line also need to be optimized
{
selected.insert(selected.end(), contours[i].begin(), contours[i].end());
}
}
convexHull(selected, selected);
RotatedRect minRect = minAreaRect(selected);
std::vector<cv::Point> corner_points = extremePoints(selected);
std::vector<cv::Point2f> corners;
corners.push_back(corner_points[0]);
corners.push_back(corner_points[1]);
corners.push_back(corner_points[2]);
corners.push_back(corner_points[3]);
sortCorners(corners);
cv::Mat quad = cv::Mat::zeros(norm(corners[1] - corners[2]), norm(corners[2] - corners[3]), CV_8UC3);
std::vector<cv::Point2f> quad_pts;
quad_pts.push_back(cv::Point2f(0, 0));
quad_pts.push_back(cv::Point2f(quad.cols, 0));
quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
quad_pts.push_back(cv::Point2f(0, quad.rows));
cv::Mat transmtx = cv::getPerspectiveTransform(corners, quad_pts);
cv::warpPerspective(src, quad, transmtx, quad.size());
resize(quad, quad, Size(), 0.25, 0.25); // you can remove this line to keep the image original size
imshow("quad", quad);
polylines(src, selected, true, Scalar(0, 0, 255), 2);
resize(src, src, Size(), 0.5, 0.5); // you can remove this line to keep the image original size
imshow("result", src);
waitKey(0);
return 0;
}
Strange, I did it with exactly that (blur, dilate, canny):
The code (in Python, but there's nothing but OpenCV function calls, so should be easy to follow; as one of the references I used this answer, which is in C++, it also shows how to correct the perspective and turn it into a rectangle):
import numpy as np
import cv2
img = cv2.imread('sngo1.jpg')
#resize and create a copy for future drawing
resize_coeff = 0.5
w, h, c = img.shape
img_in = cv2.resize(img, (int(resize_coeff*h), int(resize_coeff*w)))
img_out = img_in.copy()
#median and canny
img_in = cv2.medianBlur(img_in, 5)
img_in = cv2.Canny(img_in, 100, 200)
#morphological close for our edges
kernel = np.ones((17, 17), np.uint8)
img_in = cv2.morphologyEx(img_in, cv2.MORPH_CLOSE, kernel, iterations = 1)
#find contours, get max by area
img_in, contours, hierarchy = cv2.findContours(img_in, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
max_index, max_area = max(enumerate([cv2.contourArea(x) for x in contours]), key = lambda x: x[1])
max_contour = contours[max_index]
#approximage it with a quadrangle
approx = cv2.approxPolyDP(max_contour, 0.1*cv2.arcLength(max_contour, True), True)
approx = approx[:,0,:]
cv2.drawContours(img_out, [approx], 0, (255, 0, 0), 2)
cv2.imwrite("result.png", img_out)
I would like to detect not regular rectangular objects using OpenCV and approxPolyDP function. In the image I have 4 windows that can be approximate as rectangular contour using the approxPolyDP. But Im not able to detect all of them when set up certain threshold. I filter the windows using the wodth and the height and the angel between the vertices. I would like to have more robust code that can detect all 4 windows. Any help?Here my code
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 5;
int max_thresh = 60000;
RNG rng(12345);
void thresh_callback(int, void* );
static double angle(Point pt1, Point pt2, Point pt0)
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
void setLabel(cv::Mat& im, const std::string label, std::vector<cv::Point>& contour)
{
int fontface = cv::FONT_HERSHEY_SIMPLEX;
double scale = 0.4;
int thickness = 1;
int baseline = 0;
cv::Size text = cv::getTextSize(label, fontface, scale, thickness, &baseline);
cv::Rect r = cv::boundingRect(contour);
cv::Point pt(r.x + ((r.width - text.width) / 2), r.y + ((r.height + text.height) / 2));
cv::rectangle(im, pt + cv::Point(0, baseline), pt + cv::Point(text.width, -text.height), CV_RGB(255,255,255), CV_FILLED);
cv::putText(im, label, pt, fontface, scale, CV_RGB(0,0,0), thickness, 8);
}
int main()
{
cv::Mat src = cv::imread("p3.png");
resize(src, src, Size(640,480), 0, 0, INTER_CUBIC);
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
void thresh_callback(int, void* ) {
src = imread("p3.png");
resize(src, src, Size(640,480), 0, 0, INTER_CUBIC);
//================================
Mat gray,bw,dil,erd;
Canny(gray,bw,thresh, thresh*1, 5);
//Canny( src_gray, canny_output, thresh, thresh*1, 3 );
dilate(bw,dil,Mat());
erode(dil,erd,Mat());
Mat tmp=bw.clone();
Size kernalSize (15,20);
Mat element = getStructuringElement (MORPH_RECT, kernalSize, Point(4,4) );
morphologyEx( bw, bw, MORPH_CLOSE, element );
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;//novo
findContours(bw.clone(), contours, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
Mat dst = src.clone();
for( int i = 0; i< contours.size(); i++ )
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true) * 0.09, true);
if (approx.size() >= 4.0 && (approx.size() <= 4.1))
{
int vtc = approx.size();
vector<double> cos;
for(int j = 2; j < vtc + 1; j++)
cos.push_back(angle(approx[j%vtc], approx[j-2], approx[j-1]));
sort(cos.begin(), cos.end());
double mincos = cos.front();
double maxcos = cos.back();
if (vtc == 4 && mincos >= -0.6 && maxcos <= 0.6)
{
Rect r = boundingRect(contours[i]);
double ratio = abs(1 - (double)r.width / r.height);
if (r.height >50 && r.width >20 && r.height < 640 && r.width < 640 && r.height>r.width ) /* constraints on region size */
{
//Rect r = boundingRect(contours[i]);
//double ratio = abs(1 - (double)r.width / r.height);
line(dst, approx.at(0), approx.at(1), cvScalar(0,0,255),4);
line(dst, approx.at(1), approx.at(2), cvScalar(0,0,255),4);
line(dst, approx.at(2), approx.at(3), cvScalar(0,0,255),4);
line(dst, approx.at(3), approx.at(0), cvScalar(0,0,255),4);
}
}
}
}
}
Here the source image
And here detected windows
Here the drawing/visual contours
The following code is for partical filter for mouse and I change it for track on video with color, this works.
But I want to add scale to it now which only works with x and y. I tried to add scale to it but I failed. Please help me to add scale to the object detected to partical filter.
// Module "core"
#include <opencv2/core/core.hpp>
#include < opencv2/video/background_segm.hpp>
// Module "highgui"
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/legacy/legacy.hpp>
// Module "imgproc"
#include <opencv2/imgproc/imgproc.hpp>
#include "opencv2/videostab/videostab.hpp"
// Module "video"
#include <opencv2/video/video.hpp>
// Output
#include <iostream>
// Vector
#include <vector>
#define drawCross( center, color, d ) \
line( frame, cv::Point( center.x - d, center.y - d ), \
cv::Point( center.x + d, center.y + d ), color, 1, CV_AA, 0); \
line( frame, cv::Point( center.x + d, center.y - d ), \
cv::Point( center.x - d, center.y + d ), color, 1, CV_AA, 0 )
#define PLOT_PARTICLES 1
using namespace std;
using namespace cv;
// >>>>> Color to be tracked
#define MIN_H_BLUE 200
#define MAX_H_BLUE 300
// <<<<< Color to be tracked
vector<cv::Point> mouseV, particleV;
int main()
{
// Camera frame
cv::Mat frame;
char code = (char)-1;
cv::namedWindow("mouse particle");
cv::Mat_<float> measurement(2,1);
measurement.setTo(cv::Scalar(0));
int dim = 2;
int nParticles = 300;
float xRange = 650.0;
float yRange = 650.0;
float minRange[] = { 0, 0 };
float maxRange[] = { xRange, yRange };
CvMat LB, UB;
cvInitMatHeader(&LB, 2, 1, CV_32FC1, minRange);
cvInitMatHeader(&UB, 2, 1, CV_32FC1, maxRange);
CvConDensation* condens = cvCreateConDensation(dim, dim, nParticles);
cvConDensInitSampleSet(condens, &LB, &UB);
condens->DynamMatr[0] = 1.0;
condens->DynamMatr[1] = 0.0;
condens->DynamMatr[2] = 0.0;
condens->DynamMatr[3] = 1.0;
// Camera Index
string idx = "a.mp4";
// Camera Capture
cv::VideoCapture cap;
// >>>>> Camera Settings
if (!cap.open(idx))
{
cout << "Webcam not connected.\n" << "Please verify\n";
return EXIT_FAILURE;
}
cap.set(CV_CAP_PROP_FRAME_WIDTH, 1024);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 768);
// <<<<< Camera Settings
cout << "\nHit 'q' to exit...\n";
char ch = 0;
double ticks = 0;
bool found = false;
int notFoundCount = 0;
// >>>>> Main loop
while (ch != 'q' && ch != 'Q')
{
double precTick = ticks;
ticks = (double) cv::getTickCount();
double dT = (ticks - precTick) / cv::getTickFrequency(); //seconds
// Frame acquisition
cap >> frame;
mouseV.clear();
particleV.clear();
// >>>>> Noise smoothing
cv::Mat blur;
cv::GaussianBlur(frame, blur, cv::Size(5, 5), 3.0, 3.0);
// <<<<< Noise smoothing
// >>>>> HSV conversion
cv::Mat frmHsv;
cv::cvtColor(blur, frmHsv, CV_BGR2HSV);
// <<<<< HSV conversion
// >>>>> Color Thresholding
// Note: change parameters for different colors
cv::Mat rangeRes = cv::Mat::zeros(frame.size(), CV_8UC1);
cv::inRange(frmHsv, cv::Scalar(MIN_H_BLUE / 2, 100, 80),
cv::Scalar(MAX_H_BLUE / 2, 255, 255), rangeRes);
// <<<<< Color Thresholding
// >>>>> Improving the result
cv::erode(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
cv::dilate(rangeRes, rangeRes, cv::Mat(), cv::Point(-1, -1), 2);
// <<<<< Improving the result
// >>>>> Contours detection
vector<vector<cv::Point> > contours;
cv::findContours(rangeRes, contours, CV_RETR_EXTERNAL,
CV_CHAIN_APPROX_NONE);
// <<<<< Contours detection
// >>>>> Filtering
vector<vector<cv::Point> > balls;
vector<cv::Rect> ballsBox;
for (size_t i = 0; i < contours.size(); i++)
{
cv::Rect bBox;
bBox = cv::boundingRect(contours[i]);
float ratio = (float) bBox.width / (float) bBox.height;
if (ratio > 1.0f)
ratio = 1.0f / ratio;
// Searching for a bBox almost square
// if (ratio > 0.55 && bBox.area() >= 50)
// {
balls.push_back(contours[i]);
ballsBox.push_back(bBox);
measurement(0) = bBox.x;
measurement(1) = bBox.y;
measurement(2) = ballsBox.size();
//cout << "Balls found:" << bBox.x << endl;
// }
}
/*
cout << "Balls found:" << ballsBox.size() << endl;
*/
cv::Point measPt(measurement(0),measurement(1));
mouseV.push_back(measPt);
for (int i = 0; i < condens->SamplesNum; i++) {
float diffX = (measurement(0) - condens->flSamples[i][0])/xRange;
float diffY = (measurement(1) - condens->flSamples[i][1])/yRange;
condens->flConfidence[i] = 1.0 / (sqrt(diffX * diffX + diffY * diffY));
// plot particles
#ifdef PLOT_PARTICLES
cv::Point partPt(condens->flSamples[i][0], condens->flSamples[i][1]);
drawCross(partPt , cv::Scalar(255,0,255), 2);
#endif
}
cvConDensUpdateByTime(condens);
cv::Point statePt(condens->State[0], condens->State[1]);
particleV.push_back(statePt);
for (int i = 0; i < particleV.size() - 1; i++) {
line(frame, particleV[i], particleV[i+1], cv::Scalar(0,255,0), 1);
}
drawCross( statePt, cv::Scalar(255,255,255), 5 );
drawCross( measPt, cv::Scalar(0,0,255), 5 );
for (size_t i = 0; i < balls.size(); i++)
{
cv::drawContours(frame, balls, i, CV_RGB(20,150,20), 1);
cv::rectangle(frame, ballsBox[i], CV_RGB(0,255,0), 2);
cv::Point center;
center.x = ballsBox[i].x + ballsBox[i].width / 2;
center.y = ballsBox[i].y + ballsBox[i].height / 2;
cv::circle(frame, center, 2, CV_RGB(20,150,20), -1);
stringstream sstr;
sstr << "(" << center.x << "," << center.y << ")";
cv::putText(frame, sstr.str(),
cv::Point(center.x + 3, center.y - 3),
cv::FONT_HERSHEY_SIMPLEX, 0.5, CV_RGB(20,150,20), 2);
}
cv::imshow("mouse particle", frame);
cv::imshow("ssssssss", rangeRes);
ch = cv::waitKey(1);
}
// <<<<< Main loop
return EXIT_SUCCESS;
}
I want to use the active contour OpenCV function. I have been trying different code but I cant figure out how to get the snake to run across the image. When I run the code below I receive the same points as the line I drew across the image. How can I check to see if the snake is actually running.
//USED TO OPEN IMAGE
#include "opencv2/highgui/highgui.hpp"
//USED TO GAUSSIAN FILTER
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/core/core.hpp"
#include <iostream>
#include <fstream>
#include "opencv2/legacy/legacy.hpp"
#include <cstdlib>
using namespace std;
using namespace cv;
void myLine(Mat copy, Point pt1, Point pt2);
void Extract(Mat copy, Point pt1, Point pt2);
void readFile(Mat copy);
void snake(Mat copy, CvPoint* pointsA);
//void ActiveSnake(Mat copy, Point pt1, Point pt2);
int main(){
//CODE TO LOADING IMAGE
Mat frame = imread ("First.png", CV_LOAD_IMAGE_GRAYSCALE);
//putText(frame,"First", Point (120, 330), 1, 4, CV_RGB(255, 0, 0), 3, 8, false);
int rows = frame.rows;
int cols = frame.cols;
//If the image wont appear
if (frame.empty())
{
cout<<"No Image!"<<endl;
return -1;
}
//Cloning
Mat copy = frame.clone();
//Gaussian Filter (Originial Image, Clone Image, KSize(MUST BE POSITIVE OR ODD OR ZERO width, height), sigmaX, sigmaY
GaussianBlur(frame, copy, Size(9, 9), 0, 0);
Point pt1, pt2;
pt1.x = 0;
pt1.y = 95;
pt2.x = 900;
pt2.y = 95;
Extract (copy, pt1, pt2);
readFile(copy);
return 0;
}
void myLine (Mat copy, Point start, Point end)
{
int thickness = 2;
int lineType = 8;
line (copy, start, end, Scalar(0,0,0), thickness, lineType);
}
void Extract (Mat copy, Point pt1, Point pt2)
{
myLine(copy, Point(pt1), Point (pt2));
ofstream myfile;
myfile.open ("Test1.txt");
int copypt1x;
copypt1x = pt1.x;
for (int i = pt1.x; i <=pt2.x; i++){
for ( int j = pt1.y; 1 <=pt2.y; j++)
{
int copypt1y = pt1.y;
myfile <<copypt1x<<"\t"<<copypt1y<<endl;
}
copypt1x= copypt1x +1;
cout<<"\n";
}
myfile.close();
}
void readFile(Mat copy)
{
ifstream myfile;
CvPoint pointsA [1000];
myfile.open("Test1.txt");
if (!myfile.is_open()){
exit(EXIT_FAILURE);
}
int i = 0;
myfile >> pointsA[i].x>>pointsA[i].y;
while (myfile.good()){
i++;
myfile >> pointsA[i].x>>pointsA[i].y;
}
snake(copy, pointsA);
}
void snake(Mat copy, CvPoint* pointsA)
{
//IplImage* image2;
//image2 = cvCloneImage(&(IplImage)copy);
IplImage* image2;
image2 = cvCreateImage(cvSize(copy.cols,copy.rows),8,3);
IplImage ipltemp=copy;
cvCopy(&ipltemp,image2);
float alpha = 0.5; // Weight of continuity energy
float beta = 0.5; // Weight of curvature energy
float gamma = 0.9; // Weight of image energy
CvSize size; // Size of neighborhood of every point used to search the minimumm have to be odd
size.width = 5;
size.height = 5;
CvTermCriteria criteria;
criteria.type = CV_TERMCRIT_ITER; // terminate processing after X iteration
criteria.max_iter = 10000;
criteria.epsilon = 0.1;
int cpt = 1000;
cvSnakeImage(image2, pointsA, cpt, &alpha, &beta, &gamma, CV_VALUE, size, criteria, 0);
}
following code is for Detecting lines with Hough transform.But this code doesn't work properly.It says "cannot open pic1.png".Can you please check this code and tell me what is wrong with this code.Please help me. I am using openCV 2.3 library and visual studio 2010.Thank you.
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include"cv.h"
#include"cxcore.h"
#include"stdafx.h"
#include <iostream>
using namespace cv;
using namespace std;
void help()
{
cout << "\nThis program demonstrates line finding with the Hough transform.\n"
"Usage:\n"
"./houghlines <image_name>, Default is pic1.png\n" << endl;
}
int main(int argc, char** argv)
{
const char* filename = argc >= 2 ? argv[1] : "pic1.png";
Mat src = imread(filename, 0);
if(src.empty())
{
help();
cout << "can not open " << filename << endl;
return -1;
}
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
#if 0
vector<Vec2f> lines;
HoughLines(dst, lines, 1, CV_PI/180, 100, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
HoughLinesP(dst, lines, 1, CV_PI/180, 50, 50, 10 );
for( size_t i = 0; i < lines.size(); i++ )
{
Vec4i l = lines[i];
line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA);
}
#endif
imshow("source", src);
imshow("detected lines", cdst);
waitKey();
return 0;
}
It's looking for pic1.png in the same directory that the executable is running from.
You either need to copy the image to the same directory or enter the path (either full or relative) to the image file.
Mat src = imread("pic1.png", 0); // put the image pic1.png at the current directory.
Mat src = imread("C://...", 0); // at some other directory like #ChrisF stated.