Draw the lines detected by cv::HoughLines - c++

On this site(tutorial), it show us how to draw the lines detected by cv::HoughLines,but I can't understand how could it find out the Point between the lines.
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b)); //??
pt1.y = cvRound(y0 + 1000*(a)); //??
pt2.x = cvRound(x0 - 1000*(-b)); //??
pt2.y = cvRound(y0 - 1000*(a)); //??
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
Example from the openCV cookbook, I could understand the reason these codes
but it is more verbose
for(auto const &data : lines){
float const rho = data[0];
float const theta = data[1];
if((theta < PI/4. || theta > 3. * PI/4.)){
cv::Point pt1(rho / std::cos(theta), 0);
cv::Point pt2( (rho - result.rows * std::sin(theta))/std::cos(theta), result.rows);
cv::line(result, pt1, pt2, cv::Scalar(255), 1);
}else if{
cv::Point pt1(0, rho / std::sin(theta));
cv::Point pt2(result.cols, (rho - result.cols * std::cos(theta))/std::sin(theta));
cv::line(result, pt1, pt2, cv::Scalar(255), 1);
}
}

Hough Line transform returns Polar coordinates. To display the lines on 2D picture, coordinates have to be converted do Cartesian coordinates. Here is some more info about this: http://www.mathsisfun.com/polar-cartesian-coordinates.html
Lines, returned from Hough Transform have only one Cartesian point (intersect between blue and red line):
So to display the line author converts the coordinates to Cartesian and then calculates start and end points which are set to fixed position -1000 and +1000 pixels from the converted point:
pt1.x = cvRound(x0 + 1000*(-b)); //??
pt1.y = cvRound(y0 + 1000*(a)); //??
pt2.x = cvRound(x0 - 1000*(-b)); //??
pt2.y = cvRound(y0 - 1000*(a)); //??
One option to find intersection between returned lines is to use this equation:
For more information about implementation of this see this tutorial.

Related

opencv road line detection with HoughLines

Hello I have an image simple street image
I want to detect road lines like here
but so far I just can do this
I don't understand what I am doing wrong this is my code, I need to this HoughLines
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// Declare the output variables
Mat dst, cdst, cdstP;
// Loads an image
Mat src = imread( "/Users/user/Desktop/opencv_tests/street_scene.png", IMREAD_COLOR );
Canny(src, dst, 700, 500, 3);
// Copy edges to the images that will display the results in BGR
cvtColor(dst, cdst, COLOR_GRAY2BGR);
cdstP = cdst.clone();
// Standard Hough Line Transform
vector<Vec2f> lines; // will hold the results of the detection
HoughLines(dst, lines, 1, CV_PI/30, 75, 0, 0 );
// Draw the lines
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho;
double y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( src, pt1, pt2, Scalar(0,0,255), 3, LINE_AA);
}
imshow("Source", src);
waitKey();
return 0;
}

Hough transform old opencv convert into new opencv

i am trying to make hough transformation to parallel programming this is a code that i found online and i am new in using opencv2 in c++ anyone know how to convert this code into new open cv code?
//Next do the Canney and Hough transforms on the smaller image
IplImage* src;
src=cvLoadImage(fileName, 0);
IplImage* dst = cvCreateImage( cvGetSize(src), 8, 1 );
IplImage* color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
IplImage* final_dst = cvCreateImage( cvGetSize(src), 8, 1 );
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* lines = 0;
int i;
cvCanny( src, dst, 50, 200, 3 );
cvCvtColor( dst, color_dst, CV_GRAY2BGR );
lines = cvHoughLines2( dst,
storage,
CV_HOUGH_STANDARD,
1,
CV_PI/180,
100,
0,
0 );
for( i = 0; i < MIN(lines->total,100); i++ )
{
float* line = (float*)cvGetSeqElem(lines,i);
float rho = line[0];
float theta = line[1];
CvPoint pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 2, 8 );
}
cvCvtColor(color_dst, final_dst, CV_BGR2GRAY);
cvSaveImage(fileName, final_dst);
i have tried a bit but i think i fail
//Next do the Hough transforms on the smaller image
cv::Mat src = cv::imread(fileName);
cv::Mat dst;
cv::Mat color_dst;
cv::Mat final_dst;
//IplImage src;
//src=cvLoadImage(fileName, 0);
//IplImage* dst = cvCreateImage( cvGetSize(src), 8, 1 );
//IplImage* color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
//IplImage* final_dst = cvCreateImage( cvGetSize(src), 8, 1 );
//CvMemStorage* storage = cvCreateMemStorage(0);
cv::namedWindow(CW_IMG_ORIGINAL, cv::WINDOW_NORMAL);
cv::resizeWindow(CW_IMG_ORIGINAL, 1280, 720);
cv::moveWindow(CW_IMG_ORIGINAL, 10, 10);
cv::Mat lines;
//CvSeq* lines = 0;
//cv::Seq<linesTp>;
int i;
cvHoughLines2;
cv::Canny( src, dst, 50, 200, 3 );
cv::cvtColor( dst, color_dst, CV_GRAY2BGR );
cv::HoughLines( dst,lines,
CV_HOUGH_STANDARD,
1,
CV_PI/180,
100,
0,
0 );
for( i = 0; i < 100; i++ )
{
//float* line = (float*)cv::getSeqElem(lines, i);
float* line = lines.at(i);
float rho = line[0];
float theta = line[1];
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cv::line(color_dst, pt1, pt2, CV_RGB(255, 0, 0), 2, 8);
}
cv::cvtColor(color_dst, final_dst, CV_BGR2GRAY);
cv::imshow(CW_IMG_ORIGINAL,final_dst);
I fail due to i dono how to implement the hough lines into the line to do the iteration of the formula previously using cvhoughlines2 but cvhoughlines2 need to insert as cvArr but the cvLoadImage is not currently working in opencv4.1.2 so that I cant use IplImage and cvLoadImage to run this code.

How to fill a rounded rectangle in openCV c++

I figure a way to draw a rounded rectangle using OpenCV C++. My function is:
void RoundedRectangle(cv::Mat& src,
cv::Point topLeft,
cv::Size rectSz,
const cv::Scalar lineColor,
const int thickness,
const int lineType,
const float cornerCurvatureRatio)
{
// corners:
// p1 - p2
// | |
// p4 - p3
//
cv::Point p1 = topLeft;
cv::Point p2 = cv::Point (p1.x + rectSz.width, p1.y);
cv::Point p3 = cv::Point (p1.x + rectSz.width, p1.y + rectSz.height);
cv::Point p4 = cv::Point (p1.x, p1.y + rectSz.height);
float cornerRadius = rectSz.height*cornerCurvatureRatio;
// draw straight lines
cv::line(src, cv::Point (p1.x + cornerRadius, p1.y), cv::Point (p2.x - cornerRadius, p2.y), lineColor, thickness, lineType);
cv::line(src, cv::Point (p2.x, p2.y + cornerRadius), cv::Point (p3.x, p3.y - cornerRadius), lineColor, thickness, lineType);
cv::line(src, cv::Point (p4.x + cornerRadius, p4.y), cv::Point (p3.x - cornerRadius, p3.y), lineColor, thickness, lineType);
cv::line(src, cv::Point (p1.x, p1.y + cornerRadius), cv::Point (p4.x, p4.y - cornerRadius), lineColor, thickness, lineType);
// draw arcs
cv::Size rad = cv::Size(cornerRadius, cornerRadius);
cv::ellipse(src, p1 + cv::Point(cornerRadius, cornerRadius), rad, 180.0, 0, 90, lineColor, thickness, lineType);
cv::ellipse(src, p2 + cv::Point(-cornerRadius, cornerRadius), rad, 270.0, 0, 90, lineColor, thickness, lineType);
cv::ellipse(src, p3 + cv::Point(-cornerRadius, -cornerRadius), rad, 0.0, 0, 90, lineColor, thickness, lineType);
cv::ellipse(src, p4 + cv::Point(cornerRadius, -cornerRadius), rad, 90.0, 0, 90, lineColor, thickness, lineType);
}
Now I want to fill the rectangle. I found some fill functions such as cv::fillPoly() and cv::fillConvexPoly however, I need a vector with points. How I can get the list of points from my construction?
Nuzhny was correct, floodFill() is the easiest and potentially the fastest way to fill that rounded rectangle. (Tested with CV 4.1.1)
One line of code will do it, but here is the entire example.
void rounded_rectangle( Mat& src, Point topLeft, Point bottomRight, const Scalar lineColor, int thickness, const int lineType , const int cornerRadius)
{
Point p1 = topLeft;
Point p2 = Point (bottomRight.x, topLeft.y);
Point p3 = bottomRight;
Point p4 = Point (topLeft.x, bottomRight.y);
line(src, Point (p1.x+cornerRadius,p1.y), Point (p2.x-cornerRadius,p2.y), lineColor, thickness, lineType);
line(src, Point (p2.x,p2.y+cornerRadius), Point (p3.x,p3.y-cornerRadius), lineColor, thickness, lineType);
line(src, Point (p4.x+cornerRadius,p4.y), Point (p3.x-cornerRadius,p3.y), lineColor, thickness, lineType);
line(src, Point (p1.x,p1.y+cornerRadius), Point (p4.x,p4.y-cornerRadius), lineColor, thickness, lineType);
ellipse( src, p1+Point(cornerRadius, cornerRadius), Size( cornerRadius, cornerRadius ), 180.0, 0, 90, lineColor, thickness, lineType );
ellipse( src, p2+Point(-cornerRadius, cornerRadius), Size( cornerRadius, cornerRadius ), 270.0, 0, 90, lineColor, thickness, lineType );
ellipse( src, p3+Point(-cornerRadius, -cornerRadius), Size( cornerRadius, cornerRadius ), 0.0, 0, 90, lineColor, thickness, lineType );
ellipse( src, p4+Point(cornerRadius, -cornerRadius), Size( cornerRadius, cornerRadius ), 90.0, 0, 90, lineColor, thickness, lineType );
// choose arbitrary starting point for fill => Top left plus 10,10
Point fillFrom(topLeft.x+10, topLeft.y+10);
Scalar fillColor(199, 120, 0);
// You may want to use `lineColor` instead of `fillColor`
floodFill(src, fillFrom, fillColor);
}
To get the points from the shape that you constructed with cv::line and cv::ellipse , you could draw the shape on a black background and then find the contours of that image.
Another approach that doesn't use line and ellipse commands is to compute the contour of the shape directly using trig.
import cv2, numpy as np, math
# Define the rectangle parameters
directions, ro, next_corner, radius, increment, angle, leg, corners = [(-1,0),(0,-1),(1,0),(0,1)],[(-1,-1),(1,-1),(1,1),(-1,1)],[3,0,1,2],56, 100, 0, 321, [(500,500)]
# Create list of corners
for side in range(4): corners.append((corners[side][0]+leg*directions[side][0], corners[side][1]+leg*directions[side][1]))
# Distance function
def distance(a,b): return math.sqrt((a[0]-b[0])**2+(a[1]-b[1])**2)
# Compute the contour points for each side and corner
contour_points = []
for i in range(4):
# Do the corner
center = corners[i][0] + radius*ro[i][0], corners[i][1] + radius*ro[i][1]
for angle_increment in range(increment):
contour_points.append((int(center[0] + math.cos(angle) * radius), int(center[1] + math.sin(angle) * radius)))
angle += .5*math.pi/increment
# Do the line
start = corners[i][0]+radius*directions[i][0], corners[i][1] + radius*directions[i][1]
while distance(start, (corners[i][0]+radius*directions[i][0], corners[i][1] + radius*directions[i][1])) < leg-2*radius:
contour_points.append(start)
start = start[0]+directions[i][0], start[1]+directions[i][1]
# Draw the contour and show the image
img = np.zeros((600,600), np.uint8)
cv2.drawContours(img, [np.array(contour_points, dtype=np.int32)], 0, 255, -1)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
Use cv::floodFill with start point inside your rectangle.
Here is my solution in case id of help to anybody.
void FilledRoundedRectangle(cv::Mat& src, //Image where rect is drawn
cv::Point topLeft, //top left corner
cv::Size rectSz, //rectangle size
const cv::Scalar fillColor, //fill color
const int lineType, //type of line
const int delta, //angle between points on the ellipse
const float cornerCurvatureRatio) //curvature of the corner
{
// corners:
// p1 - p2
// | |
// p4 - p3
//
cv::Point p1 = topLeft;
cv::Point p2 = cv::Point (p1.x + rectSz.width, p1.y);
cv::Point p3 = cv::Point (p1.x + rectSz.width, p1.y + rectSz.height);
cv::Point p4 = cv::Point (p1.x, p1.y + rectSz.height);
int cornerRadius = static_cast<int>(rectSz.height*cornerCurvatureRatio);
std::vector<cv::Point> points;
std::vector<cv::Point> pts;
// Add arcs points
cv::Size rad = cv::Size(cornerRadius, cornerRadius);
// segments:
// s2____s3
// s1 s4
// | |
// s8 s5
// s7_____s6
//
//Add arc s1 to s2
cv::ellipse2Poly(p1 + cv::Point(cornerRadius, cornerRadius) , rad, 180.0, 0, 90, delta , pts);
points.insert(points.end(), pts.begin(), pts.end());
pts.clear();
//Add line s2-s3
points.push_back(cv::Point (p1.x + cornerRadius, p1.y)); points.push_back(cv::Point (p2.x - cornerRadius, p2.y));
//Add arc s3 to s4
cv::ellipse2Poly(p2 + cv::Point(-cornerRadius, cornerRadius) , rad, 270.0, 0, 90, delta, pts);
points.insert(points.end(), pts.begin(), pts.end());
pts.clear();
//Add line s4 to s5
points.push_back(cv::Point (p2.x, p2.y + cornerRadius)); points.push_back(cv::Point (p3.x, p3.y - cornerRadius));
//Add arc s5 to s6
cv::ellipse2Poly(p3 + cv::Point(-cornerRadius, -cornerRadius), rad, 0.0, 0, 90, delta, pts);
points.insert(points.end(), pts.begin(), pts.end());
pts.clear();
//Add line s7 to s8
points.push_back(cv::Point (p4.x + cornerRadius, p4.y)); points.push_back(cv::Point (p3.x - cornerRadius, p3.y));
//Add arc s7 to s8
cv::ellipse2Poly(p4 + cv::Point(cornerRadius, -cornerRadius) , rad, 90.0, 0, 90, delta, pts);
points.insert(points.end(), pts.begin(), pts.end());
//Add line s1 to s8
points.push_back(cv::Point (p1.x, p1.y + cornerRadius)); points.push_back(cv::Point (p4.x, p4.y - cornerRadius));
//fill polygon
cv::fillConvexPoly(src, points, fillColor, lineType);
}
int main(int argc, char** argv)
{
try
{
cv::Mat img = cv::Mat(600, 600,CV_8UC1,cv::Scalar(0));
cv::Point topLeft(179, 179);
cv::Size rectSz(321, 321);
cv::Scalar fillColor(255, 255, 255);
int delta = 1; //every 1 degree
int lineType = cv::LINE_AA;
float cornerCurvatureRatio = 0.1;
FilledRoundedRectangle(img,
topLeft,
rectSz,
fillColor,
lineType,
delta,
cornerCurvatureRatio);
cv::imshow("", img);
cv::waitKey(0);
return 0;
std::cin.get();
} //end try
catch ( std::exception const & ex )
{
std::string errMsg = ex.what();
printf( "%s\n", errMsg.c_str() );
}
catch ( ... )
{
printf( "Error: unknown exception\n" );
}
}
This answer is a generalization of the answer submitted by #Stephen Meschke in case anyone is interested
import cv2
import numpy as np
# Distance function
def distance(a,b):
return np.sqrt((a[0]-b[0])**2+(a[1]-b[1])**2)
def RoundedRectPoints(topLeft,rectSz,cornerCurvature):
# Define the rectangle parameters
directions = [(-1,0),(0,-1),(1,0),(0,1)]
ro = [(-1,-1),(1,-1),(1,1),(-1,1)]
radius = cornerCurvature*(rectSz[0]+rectSz[1]);
increment = 100
angle = 0
corners = [(topLeft[0]+rectSz[0],topLeft[1]+rectSz[1])]
# Create list of corners
for side in range(4):
corners.append((corners[side][0]+rectSz[side%2]*directions[side][0], corners[side][1]+rectSz[side%2]*directions[side][1]))
# Compute the contour points for each side and corner
contour_points = []
for i in range(4):
# Do the corner
center = corners[i][0] + radius*ro[i][0], corners[i][1] + radius*ro[i][1]
for angle_increment in range(increment):
contour_points.append((int(center[0] + np.cos(angle) * radius), int(center[1] + np.sin(angle) * radius)))
angle += .5*np.pi/increment
# Do the line
start = corners[i][0]+radius*directions[i][0], corners[i][1] + radius*directions[i][1]
while distance(start, (corners[i][0]+radius*directions[i][0], corners[i][1] + radius*directions[i][1])) < np.min(rectSz)-2*radius:
contour_points.append(start)
start = start[0]+directions[i][0], start[1]+directions[i][1]
return contour_points
# Draw the contour and show the image
img = np.zeros((600,600), np.uint8)
topLeft = (179,179)
rectSz = (321,321)
cornerCurvature = 0.09
contour_points = RoundedRectPoints(topLeft,rectSz,cornerCurvature)
cv2.drawContours(img, [np.array(contour_points, dtype=np.int32)], 0, 255, -1)
cv2.imshow('img',img)
cv2.waitKey(0)
cv2.destroyAllWindows()

How to get Hough transform matrix ,theta and rho value of hough transformation in opencv cpp

I have matlab code to get Hough transform matrix, theta and rho values.
[H,T,R] = hough(EdgeImage);
How to get H, T, R values in OpenCV?
In OpenCV, you call the HT as:
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI/180.0, 100);
where edge is your binary input image, and lines is a std::vector of Vec2f, i.e. a vector of 2 float values: the first value is rho, the second one is theta.
OpenCV doesn't output the H parameter space, if you need also that you need to write some code yourself and adapt HoughLines to output also the H value. However, this is rarely needed in practice.
This is a simple example on how to use standard Hough Transform, adapted from OpenCV tutorials:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Load image
Mat3b img = imread("path_to_image");
Mat3b res = img.clone();
// Convert to grayscale
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
// Compute edges
Mat1b edges;
Canny(gray, edges, 100, 400);
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI/180.0, 100);
for (size_t i = 0; i < lines.size(); i++)
{
// rho and theta values
float rho = lines[i][0];
float theta = lines[i][1];
// Draw the line
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(res, pt1, pt2, Scalar(0, 0, 255), 2);
}
imshow("Input", img);
imshow("Output", res);
waitKey();
return 0;
}
Input:
Output:

how to find the parameters of the orthogonal line of a found one in opencv

I have a program that find lines in a frame actually I have only one, I'm trying to find "some" line that are orthogonal to the found one:
............................
cv::HoughLines(canny,lineQ,1,CV_PI/180,200);
for( size_t i = 0; i < lineQ.size(); i++ )
{
float rho = lineQ[i][0], theta = lineQ[i][1];
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
angle =(-1)* atan2f((pt2.y-pt1.y),(pt2.x-pt1.x))* 180.0/CV_PI; // getting the angle of the lines
lineFunction(angle, mask,rho,pt1,theta); // this function should draw the othogonal line
line( mask, pt1, pt2, cv::Scalar(0,0,255), 3, CV_AA);
...........
here is the function in which I'm try to find the orthogonal one :
void lineFunction(float alpha,cv::Mat frame,float rho,cv::Point Pt3,float omega){
float b = 0.0;
float newAlpha = 0.0 ;
float newRho = 0.0;
cv::Point pt1, pt2;
if(alpha != 90.0 && alpha !=0.0 && alpha!=-90.0){
b = (Pt3.x * (-1)) + rho/sinf(alpha);
if( alpha <0){
newAlpha = CV_PI + omega;
newRho = b+sinf(newAlpha);
double a = cos(newAlpha), b = sin(newAlpha);
double x0 = a*newRho, y0 = b*newRho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
}else if ( alpha > 0 ) {
newAlpha = CV_PI - omega;
newRho = b+sinf(newAlpha);
double a = cos(newAlpha), b = sin(newAlpha);
double x0 = a*newRho, y0 = b*newRho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
}
line( frame, pt1, pt2, cv::Scalar(0,255,0), 3, CV_AA);
}
}
and this the result that I get, the red line has been found and green one should be calculated
any Idea what I'm doing wrong ?
thanks a lot for any help !
You convert your angle from radians to degrees before passing to lineFunction, the cos and sin functions in there should be working on radians I believe.
I've played with the function lineFunction and it work now, I hope this will help somebody:
void lineFunction(float alpha,cv::Mat frame,float rho,cv::Point pt,float omega){
float winkel=0;
float distance;
// calculating the new angle
if( alpha !=0.0 && alpha!=90.0 && alpha!=90.0){
//if( alpha<0){
winkel = CV_PI/2+omega;
distance = (pt.x*cosf(winkel))+ (pt.y *sin(winkel));
cv::Point pt1, pt2;
double a = cos(winkel), b = sin(winkel);
double x0 = a*distance, y0 = b*distance;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( frame, pt1, pt2, cv::Scalar(0,255,0), 3, CV_AA);
}
}
here is what I get :