I'm trying to detect Hough Lines in video captured by cam. The problem is that to load a new frame, I have to close current window and after that, a new window with a new frame is opened automatically. I just want to get rid of closing windows to load new frames. How can I play the video in single window without closing it?
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
int main()
{
VideoCapture stream(0);
if(!stream.isOpened()){
cout << "\nCannot open video camera";
} else {
//CAPTURING FRAMES FROM CAMERA
while( true ){
Mat src;
stream.read(src);
//Mat src = imread("D:/LineDetection.png", 0);
Mat dst, cdst;
Canny(src, dst, 50, 200, 3);
cvtColor(dst, cdst, CV_GRAY2BGR);
vector<Vec2f> lines;
// detect lines
HoughLines(dst, lines, 1, CV_PI/180, 150, 0, 0 );
// draw lines
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
imshow("source", src);
imshow("detected lines", cdst);
if( waitKey() == 32 )
break;
}
}
return 0;
}
Change waitKey() to waitKey(10).
waitKey() - will wait for keypress infinite time.
Related
Hello I have an image simple street image
I want to detect road lines like here
but so far I just can do this
I don't understand what I am doing wrong this is my code, I need to this HoughLines
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, char** argv)
{
// Declare the output variables
Mat dst, cdst, cdstP;
// Loads an image
Mat src = imread( "/Users/user/Desktop/opencv_tests/street_scene.png", IMREAD_COLOR );
Canny(src, dst, 700, 500, 3);
// Copy edges to the images that will display the results in BGR
cvtColor(dst, cdst, COLOR_GRAY2BGR);
cdstP = cdst.clone();
// Standard Hough Line Transform
vector<Vec2f> lines; // will hold the results of the detection
HoughLines(dst, lines, 1, CV_PI/30, 75, 0, 0 );
// Draw the lines
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho;
double y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( src, pt1, pt2, Scalar(0,0,255), 3, LINE_AA);
}
imshow("Source", src);
waitKey();
return 0;
}
i am trying to make hough transformation to parallel programming this is a code that i found online and i am new in using opencv2 in c++ anyone know how to convert this code into new open cv code?
//Next do the Canney and Hough transforms on the smaller image
IplImage* src;
src=cvLoadImage(fileName, 0);
IplImage* dst = cvCreateImage( cvGetSize(src), 8, 1 );
IplImage* color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
IplImage* final_dst = cvCreateImage( cvGetSize(src), 8, 1 );
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* lines = 0;
int i;
cvCanny( src, dst, 50, 200, 3 );
cvCvtColor( dst, color_dst, CV_GRAY2BGR );
lines = cvHoughLines2( dst,
storage,
CV_HOUGH_STANDARD,
1,
CV_PI/180,
100,
0,
0 );
for( i = 0; i < MIN(lines->total,100); i++ )
{
float* line = (float*)cvGetSeqElem(lines,i);
float rho = line[0];
float theta = line[1];
CvPoint pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 2, 8 );
}
cvCvtColor(color_dst, final_dst, CV_BGR2GRAY);
cvSaveImage(fileName, final_dst);
i have tried a bit but i think i fail
//Next do the Hough transforms on the smaller image
cv::Mat src = cv::imread(fileName);
cv::Mat dst;
cv::Mat color_dst;
cv::Mat final_dst;
//IplImage src;
//src=cvLoadImage(fileName, 0);
//IplImage* dst = cvCreateImage( cvGetSize(src), 8, 1 );
//IplImage* color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
//IplImage* final_dst = cvCreateImage( cvGetSize(src), 8, 1 );
//CvMemStorage* storage = cvCreateMemStorage(0);
cv::namedWindow(CW_IMG_ORIGINAL, cv::WINDOW_NORMAL);
cv::resizeWindow(CW_IMG_ORIGINAL, 1280, 720);
cv::moveWindow(CW_IMG_ORIGINAL, 10, 10);
cv::Mat lines;
//CvSeq* lines = 0;
//cv::Seq<linesTp>;
int i;
cvHoughLines2;
cv::Canny( src, dst, 50, 200, 3 );
cv::cvtColor( dst, color_dst, CV_GRAY2BGR );
cv::HoughLines( dst,lines,
CV_HOUGH_STANDARD,
1,
CV_PI/180,
100,
0,
0 );
for( i = 0; i < 100; i++ )
{
//float* line = (float*)cv::getSeqElem(lines, i);
float* line = lines.at(i);
float rho = line[0];
float theta = line[1];
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cv::line(color_dst, pt1, pt2, CV_RGB(255, 0, 0), 2, 8);
}
cv::cvtColor(color_dst, final_dst, CV_BGR2GRAY);
cv::imshow(CW_IMG_ORIGINAL,final_dst);
I fail due to i dono how to implement the hough lines into the line to do the iteration of the formula previously using cvhoughlines2 but cvhoughlines2 need to insert as cvArr but the cvLoadImage is not currently working in opencv4.1.2 so that I cant use IplImage and cvLoadImage to run this code.
I have followed this article on how to calculate and deskew an image for better Tesseract OCR results: http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/
The correct angle is calculated, but the text is never actually rotated.
These are the methods I am using:
+(UIImage *) prepareImage: (UIImage *)image{
return deskew(image, computeSkew(image));
}
// Organization -> Deskewing
double computeSkew(UIImage *image)
{
Mat src;
UIImageToMat(image, src);
cv::Size size = src.size();
bitwise_not(src, src);
vector<Vec4i> lines;
HoughLinesP(src, lines, 1, CV_PI/180, 100, size.width / 2.f, 20);
Mat disp_lines(size, CV_8UC1, Scalar(0, 0, 0));
double angle = 0.;
unsigned nb_lines = lines.size();
for (unsigned i = 0; i < nb_lines; ++i)
{
line(disp_lines, cv::Point(lines[i][0], lines[i][1]),
cv::Point(lines[i][2], lines[i][3]), Scalar(255, 0 ,0));
angle += atan2((double)lines[i][3] - lines[i][1],
(double)lines[i][2] - lines[i][0]);
}
angle /= nb_lines; // mean angle, in radians.
cout << angle << endl;
return angle;
}
UIImage* deskew(UIImage *image, double angle)
{
Mat img;
UIImageToMat(image, img);
bitwise_not(img, img);
vector<cv::Point> points;
Mat_<uchar>::iterator it = img.begin<uchar>();
Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it)
points.push_back(it.pos());
RotatedRect box = minAreaRect(Mat(points));
Mat rot_mat = getRotationMatrix2D(box.center, angle, 1);
Mat rotated;
warpAffine(img, rotated, rot_mat, img.size(), INTER_CUBIC);
return MatToUIImage(rotated);
}
UIImageToMat and MatToUIImage are reliable methods that convert back and forth. I have also tried to return the angle in both radians and degrees. Both times the image returned from the prepareImage function is still tilted at the same angle as the original image.
I have matlab code to get Hough transform matrix, theta and rho values.
[H,T,R] = hough(EdgeImage);
How to get H, T, R values in OpenCV?
In OpenCV, you call the HT as:
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI/180.0, 100);
where edge is your binary input image, and lines is a std::vector of Vec2f, i.e. a vector of 2 float values: the first value is rho, the second one is theta.
OpenCV doesn't output the H parameter space, if you need also that you need to write some code yourself and adapt HoughLines to output also the H value. However, this is rarely needed in practice.
This is a simple example on how to use standard Hough Transform, adapted from OpenCV tutorials:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
// Load image
Mat3b img = imread("path_to_image");
Mat3b res = img.clone();
// Convert to grayscale
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
// Compute edges
Mat1b edges;
Canny(gray, edges, 100, 400);
vector<Vec2f> lines;
HoughLines(edges, lines, 1, CV_PI/180.0, 100);
for (size_t i = 0; i < lines.size(); i++)
{
// rho and theta values
float rho = lines[i][0];
float theta = lines[i][1];
// Draw the line
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(res, pt1, pt2, Scalar(0, 0, 255), 2);
}
imshow("Input", img);
imshow("Output", res);
waitKey();
return 0;
}
Input:
Output:
Hi I've a base image and other images that I'd like to rotate with the same angle as the base image.
this is my base image.
this is an example image that I'd like to rotate.
here my full code.
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#define PI 3.14159265
using namespace cv;
using namespace std;
void rotate(cv::Mat& src, double angle, cv::Mat& dst)
{
int len = std::max(src.cols, src.rows);
cv::Point2f pt(len/2., len/2.);
cv::Mat r = cv::getRotationMatrix2D(pt, angle, 1.0);
cv::warpAffine(src, dst, r, cv::Size(len, len));
}
float angleBetween(const Point &v1, const Point &v2)
{
float len1 = sqrt(v1.x * v1.x + v1.y * v1.y);
float len2 = sqrt(v2.x * v2.x + v2.y * v2.y);
float dot = v1.x * v2.x + v1.y * v2.y;
float a = dot / (len1 * len2);
if (a >= 1.0)
return 0.0;
else if (a <= -1.0)
return PI;
else{
int degree;
degree = acos(a)*180/PI;
return degree;
};
}
int main()
{
Mat char1 = imread( "/Users/Rodrane/Documents/XCODE/OpenCV/mkedenemeleri/anarev/rotated.jpg",CV_LOAD_IMAGE_GRAYSCALE );
Mat image = imread("/Users/Rodrane/Documents/XCODE/OpenCV/mkedenemeleri/anarev/gain2000_crop.jpg", CV_LOAD_IMAGE_GRAYSCALE );
if( !char1.data )
{
std::cout<< "Error reading object " << std::endl;
return -1;
}
GaussianBlur( char1, char1, Size(3, 3), 2, 2 );
GaussianBlur( image, image, Size(3, 3), 2, 2 );
adaptiveThreshold(char1,char1,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,9,14);
adaptiveThreshold(image,image,255,CV_ADAPTIVE_THRESH_MEAN_C,CV_THRESH_BINARY,9,14);
//Detect the keypoints using SURF Detector
int minHessian = 200;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> kp_object;
detector.detect( char1, kp_object );
//Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat des_object;
extractor.compute( char1, kp_object, des_object );
FlannBasedMatcher matcher;
namedWindow("Good Matches");
std::vector<Point2f> obj_corners(4);
//Get the corners from the object
obj_corners[0] = cvPoint(0,0);
obj_corners[1] = cvPoint( char1.cols, 0 );
obj_corners[2] = cvPoint( char1.cols, char1.rows );
obj_corners[3] = cvPoint( 0, char1.rows );
Mat frame;
Mat des_image, img_matches;
std::vector<KeyPoint> kp_image;
std::vector<vector<DMatch > > matches;
std::vector<DMatch > good_matches;
std::vector<Point2f> obj;
std::vector<Point2f> scene;
std::vector<Point2f> scene_corners(4);
Mat H;
detector.detect( image, kp_image );
extractor.compute( image, kp_image, des_image );
matcher.knnMatch(des_object, des_image, matches, 2);
for(int i = 0; i < min(des_image.rows-1,(int) matches.size()); i++) //THIS LOOP IS SENSITIVE TO SEGFAULTS
{
if((matches[i][0].distance < 0.6*(matches[i][1].distance)) && ((int) matches[i].size()<=2 && (int) matches[i].size()>0))
{
good_matches.push_back(matches[i][0]);
}
}
//Draw only "good" matches
drawMatches( char1, kp_object, image, kp_image, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
if (good_matches.size() >= 4)
{
for( int i = 0; i < good_matches.size(); i++ )
{
//Get the keypoints from the good matches
obj.push_back( kp_object[ good_matches[i].queryIdx ].pt );
scene.push_back( kp_image[ good_matches[i].trainIdx ].pt );
cout<<angleBetween(obj[i],scene[i])<<endl; //angles between images
}
H = findHomography( obj, scene, CV_RANSAC );
perspectiveTransform( obj_corners, scene_corners, H);
// cout<<angleBetween(obj[0], scene[0])<<endl;
//Draw lines between the corners (the mapped object in the scene image )
}
//Show detected matches
// resize(img_matches, img_matches, Size(img_matches.cols/2, img_matches.rows/2));
imshow( "Good Matches", img_matches );
waitKey();
return 0;
}
what actually my code doing is;
I do detect features of both images
Calculate degrees between point of my base image and example image
since all degrees between points are different how can I rotate my image depending on a features?
also for example lets say features of character M is detected and the angle is 30 in some conditions rotating image for degree 30 will give me horizontally aligned but vertically wrong.
the problem is even the first features are in the same line this doesn't mean example image rotated correctly (it might need rotate 180 degrees more for example)
I remade your function without using the angles:
void rotate(cv::Mat& originalImage,cv::Mat& rotatedImage,cv::InputArray rotated,
cv::Mat& dst) {
std::vector<cv::Point2f> original(4);
original[0] = cv::Point( 0, 0);
original[1] = cv::Point( originalImage.cols, 0 );
original[2] = cv::Point( originalImage.cols, originalImage.rows );
original[3] = cv::Point( 0, originalImage.rows );
dst = cv::Mat::zeros(originalImage.rows, originalImage.cols, CV_8UC3);
cv::Mat transform = cv::getPerspectiveTransform(rotated, original);
cv::warpPerspective(rotatedImage, dst, transform, dst.size() );
}
Note that the input 'rotated' is in your case 'scene_corners' and 'dst' is the resulting image.
Hope that helps!