I have followed this article on how to calculate and deskew an image for better Tesseract OCR results: http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/
The correct angle is calculated, but the text is never actually rotated.
These are the methods I am using:
+(UIImage *) prepareImage: (UIImage *)image{
return deskew(image, computeSkew(image));
}
// Organization -> Deskewing
double computeSkew(UIImage *image)
{
Mat src;
UIImageToMat(image, src);
cv::Size size = src.size();
bitwise_not(src, src);
vector<Vec4i> lines;
HoughLinesP(src, lines, 1, CV_PI/180, 100, size.width / 2.f, 20);
Mat disp_lines(size, CV_8UC1, Scalar(0, 0, 0));
double angle = 0.;
unsigned nb_lines = lines.size();
for (unsigned i = 0; i < nb_lines; ++i)
{
line(disp_lines, cv::Point(lines[i][0], lines[i][1]),
cv::Point(lines[i][2], lines[i][3]), Scalar(255, 0 ,0));
angle += atan2((double)lines[i][3] - lines[i][1],
(double)lines[i][2] - lines[i][0]);
}
angle /= nb_lines; // mean angle, in radians.
cout << angle << endl;
return angle;
}
UIImage* deskew(UIImage *image, double angle)
{
Mat img;
UIImageToMat(image, img);
bitwise_not(img, img);
vector<cv::Point> points;
Mat_<uchar>::iterator it = img.begin<uchar>();
Mat_<uchar>::iterator end = img.end<uchar>();
for (; it != end; ++it)
if (*it)
points.push_back(it.pos());
RotatedRect box = minAreaRect(Mat(points));
Mat rot_mat = getRotationMatrix2D(box.center, angle, 1);
Mat rotated;
warpAffine(img, rotated, rot_mat, img.size(), INTER_CUBIC);
return MatToUIImage(rotated);
}
UIImageToMat and MatToUIImage are reliable methods that convert back and forth. I have also tried to return the angle in both radians and degrees. Both times the image returned from the prepareImage function is still tilted at the same angle as the original image.
Related
Im struggling with the shape detection using OpenCV for C++. The edged figures such as triangle and rectangular are detected trouble-free. But when it comes to circle it estimates number of vertices up to 6-8. Could somebody help me?
void getContours(Mat video){
Mat grayscale, canny_output;
cvtColor(video, grayscale,COLOR_RGB2GRAY);//converting image to grayscale
GaussianBlur(grayscale, grayscale, Size(9, 9), 2, 2 );
threshold(grayscale, grayscale,60,255,THRESH_BINARY);
vector <vector<Point>> contours, output_contour;
vector <Vec4i> hierarchy;
findContours( grayscale, contours, hierarchy, RETR_TREE,CHAIN_APPROX_SIMPLE );
Mat drawing = Mat::zeros( grayscale.size(), CV_8UC3 );
vector<Point> c;
for (size_t i = 0; i<contours.size(); i++){
c = contours[i];
Rect crect = boundingRect(c);
// compute the center of the contour, then detect the name of the
// shape using only the contour
Moments M = moments(c);
int cX, cY;
cX = static_cast<int>(M.m10/M.m00);
cY = static_cast<int>(M.m01/M.m00);
string shape = detect(Mat(c));
drawContours( drawing, contours, (int)i, Scalar(0, 255, 0), 2);
Point pt(cX,cY);
putText(drawing,shape,pt, FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255), 2);
imshow("contour", drawing);
}
}
string detect(const Mat &curve){
string shape = "unidentified";
double peri = arcLength(curve, true);
Mat approx;
approxPolyDP(curve, approx, 0.04 * peri, true); // 0.01~0.05
const int num_of_vertices = approx.rows;
if(num_of_vertices == 0){
shape = "circle";
}
if(num_of_vertices==2){
shape = "line";
}
cout<<"\n"<<num_of_vertices;
return to_string(num_of_vertices);
}
I'm trying to build a simple Augmented Reality application using OpenCV 4.1.1 and Aruco. The goal is to overlay an image on top of a marker but have the image go beyond the edges of the marker.
I have calibrated my camera and gotten the camera matrix and distortion coefficients. By using OpenCV's warpPerspective I can draw an image on top of a marker, but I can only tie it to the corners of the marker so it stays within the border of the marker.
std::vector<int> ids;
std::vector<std::vector<Point2f>> corners;
// detect markers
aruco::detectMarkers(image, dictionary, corners, ids);
if (ids.size() > 0) {
// file with image to draw
auto file = "square.png";
// image to draw on the marker
Mat im_src = imread(file);
if (im_src.data == NULL) {
std::cout << file << ": File not found\n" << std::endl;
continue;
}
// flip(im_src, im_src, 1);
// points of corners of the image
std::vector<Point2f> pts_src;
pts_src.push_back(Point2f(0, 0));
pts_src.push_back(Point2f(im_src.cols-1, 0));
pts_src.push_back(Point2f(im_src.cols-1, im_src.rows-1));
pts_src.push_back(Point2f(0, im_src.rows-1));
// use aruco marker
for (int i = 0; i < ids.size(); i++) {
if (ids[i] == 69) {
aruco::drawDetectedMarkers(imageCopy, corners, ids);
std::vector<Point> pts_dst;
pts_dst.push_back(corners[i][0]);
pts_dst.push_back(corners[i][1]);
pts_dst.push_back(corners[i][2]);
pts_dst.push_back(corners[i][3]);
Mat h = findHomography(pts_src, pts_dst);
Mat im_out;
warpPerspective(im_src, im_out, h, imageCopy.size());
fillConvexPoly(imageCopy, pts_dst, 0, 16);
imageCopy = imageCopy + im_out;
}
}
Here is an image of what I have and what I want. I think I need to use 3d points to draw the image but i'm not sure how to do that. Any help would be appreciated.
[
[
As you said in the comment, if the marker length is available, say l0, you can define the length of the desired square as l = l0 * 1.05 or something.
for (int i = 0; i < ids.size(); i++) {
if (ids[i] == 69) {
aruco::drawDetectedMarkers(imageCopy, corners, ids);
// Estimate the pose of the marker
std::vector<cv::Vec3d> rvecs, tvecs;
cv::aruco::estimatePoseSingleMarkers(
corners, l0, camera_matrix, dist_coeffs,
rvecs, tvecs
);
drawSquare(
image_copy, camera_matrix, dist_coeffs, rvecs[i], tvecs[i],
l0
);
}
}
void drawSquare(
cv::InputOutputArray image, cv::InputArray cameraMatrix,
cv::InputArray distCoeffs, cv::InputArray rvec, cv::InputArray tvec,
float l0
)
{
float l = l0 * 1.05; // new square is 5% larger than the aruco marker
float half_l = l / 2.0;
// Define the square on the camera frame (this is 3D since the camera
// frame is 3D).
std::vector<cv::Point3f> squarePoints;
squarePoints.push_back(cv::Point3f(half_l, half_l, 0));
squarePoints.push_back(cv::Point3f(half_l, -half_l, 0));
squarePoints.push_back(cv::Point3f(-half_l, -half_l, 0));
squarePoints.push_back(cv::Point3f(-half_l, half_l, 0));
// Project the square to the image.
std::vector<cv::Point2f> imagePoints;
projectPoints(
squarePoints, rvec, tvec, cameraMatrix, distCoeffs, imagePoints
);
// Draw the square on the image.
cv::line(image, imagePoints[0], imagePoints[1], cv::Scalar(255, 0, 0), 3);
cv::line(image, imagePoints[1], imagePoints[2], cv::Scalar(255, 0, 0), 3);
cv::line(image, imagePoints[2], imagePoints[3], cv::Scalar(255, 0, 0), 3);
cv::line(image, imagePoints[3], imagePoints[0], cv::Scalar(255, 0, 0), 3);
}
I did not test this, but I used a similar code for a different project. If you run into any issues, please let me know. I will update the above code.
I have got bulk of car images and want to perform automatic number plate recognition but i am stuck at localization phase .I want to get license plate individually as output on which i can perform recognition.Here is my code for localization:
int main(int args,char* argv)
{
//String filename;
//filename="";
cv::Mat image=cv::imread("C:\\Users\\Sarora\\Downloads\\Images\\frame_1375.jpg",CV_LOAD_IMAGE_COLOR);
cv::Mat img;
cv::Mat img_sobel;
cv::Mat grad_x, grad_y;
cv::Mat abs_grad_x, abs_grad_y;
cv::Mat imgContours;
//vector <Plate>result;
cv::cvtColor(image, img, CV_BGR2GRAY);
blur(img, img, cv::Size(5,5));
//cv::namedWindow("Img1.jpg", CV_WINDOW_AUTOSIZE );
//sobel filter applied on image..............................................................................................
cv::Sobel(img, grad_x, CV_16S, 1, 0,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_x, abs_grad_x );
cv::Sobel(img, grad_y, CV_16S, 0, 1,3,1,0, cv::BORDER_DEFAULT);
convertScaleAbs( grad_y, abs_grad_y );
addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, img_sobel );
cv::imwrite("Img2.jpg",img_sobel);
//Threshold the image...................................................................................................................
cv::Mat Thresh_img;
threshold(img_sobel, Thresh_img, 0, 255, CV_THRESH_OTSU+CV_THRESH_BINARY);
//imshow("Threshold", Thresh_img);
//Morphological close operation applied................................................................................................
cv::Mat element1=cv::getStructuringElement(cv::MORPH_RECT, cv::Size(3, 3));
cv::morphologyEx(Thresh_img,Thresh_img,CV_MOP_CLOSE,element1);
cv::imwrite("Close1.jpg",Thresh_img);
//cv::waitKey(5000);
//find Contours of whole image......................................................................................................
std::vector <std::vector<cv::Point>> contours;
cv::findContours(Thresh_img, contours,CV_RETR_LIST,CV_CHAIN_APPROX_NONE);
//cv::drawContours(image,contours,-1,cv::Scalar(0,0,255),3);
cv::imwrite("Contours1.jpg",image);
std::vector <std::vector<cv::Point>>::iterator itc= contours.begin();
std::vector <cv::RotatedRect> rects;
//vector<vector<Point> > contours_poly(rects.size());
//vector<Rect> boundRect(rects.size());
//Remove patch not inside the limits of aspect ratio and area..................................................................................
while (itc!=contours.end()) {
cv::RotatedRect mr= cv::minAreaRect(cv::Mat(*itc));
if( !verifySizes(mr))
{ itc= contours.erase(itc);
}else {
++itc;
rects.push_back(mr);
}
}
cv::Mat drawing;
vector<vector<cv::Point> > contours_poly(rects.size());
vector<cv::Rect> boundRect(rects.size());
//Draw contours
cv::Mat output;
image.copyTo(output);
for(int i=0;i<rects.size();i++)
{
approxPolyDP(cv::Mat(contours[i]), contours_poly[i], 10, true);
boundRect[i] = cv::boundingRect(cv::Mat(contours_poly[i]));
}
//cv::imwrite("Contours.jpg", output);
for (int i = 0; i < rects.size(); i++)
{
drawContours(output, contours_poly, i, CV_RGB(255, 255, 255), 1, 8, vector<cv::Vec4i>(), 0, cv::Point());
//rectangle(output, boundRect[i].tl(), boundRect[i].br(), CV_RGB(0, 255, 0), 3, 8, 0);
}
cv::imwrite("drawing1.jpg",output);
}
bool verifySizes(cv::RotatedRect mr){
float error=0.4;
//Set a min and max area. All other patches are discarded
int min= 5; // minimum area
int max=1000; // maximum area
//Get only patches that match
float rmin= 1;
float rmax= 10;
int area= mr.size.height * mr.size.width;
float r= (float)mr.size.width / (float)mr.size.height;
if(r<1)
r= (float)mr.size.height / (float)mr.size.width;
if(( area < min || area > max ) || ( r < rmin || r > rmax )){
return false;
}else{
return true;
}
}
I have performed sobel filter,Threshold(OTSU+binary),Morphological operation CLOSE,findContours(),removal of one not inside limits of area and aspect ratio and approxPolyDP on the imageThis is my input image
This is approxPolyDP image
Problem is output image is not forming rectangles around License plate.Can anyone tell what is wrong in the code and also how can i proceed further to automatically find license plates in bulk of images?I am confused.
Thank you
I have a binary image:
I want to remove the bottom two crescent shapes(size and area may change with different images) from the image or at-least differentiate it from the rest.
I tried Hough circle transform to detect the curves as it resembles a portion of a circle, but that code was not working:
int main(int argc, char** argv)
{
Mat src, gray;
src = imread("446.bmp", 1);
namedWindow("src", 1);
imshow("src", src);
waitKey(0);
cvtColor(src, gray, CV_BGR2GRAY);
// Reduce the noise so we avoid false circle detection
GaussianBlur(gray, gray, Size(9, 9), 2, 2);
vector<Vec3f> circles;
// Apply the Hough Transform to find the circles
HoughCircles(gray, circles, CV_HOUGH_GRADIENT, 1, 30, 100, 100, 0, 0);
// Draw the circles detected
for (size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][1]));
int radius = cvRound(circles[i][2]);
circle(src, center, 3, Scalar(0, 255, 0), -1, 8, 0);// circle center
circle(src, center, radius, Scalar(0, 0, 255), 3, 8, 0);// circle outline
cout << "center : " << center << "\nradius : " << radius << endl;
}
// Show your results
namedWindow("Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE);
imshow("Hough Circle Transform Demo", src);
waitKey(0);
return 0;
}
But No circle is being drawn or the crescent moon shapes are not being detected at all. Any idea where I went wrong?
EDIT 1- I have added some other images too:
Edit-2 new image to try:-
i made some modification on the code posted for other question
you could try it
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace std;
//! Compute the distance between two points
/*! Compute the Euclidean distance between two points
*
* #param a Point a
* #param b Point b
*/
static double distanceBtwPoints(const cv::Point2f &a, const cv::Point2f &b)
{
double xDiff = a.x - b.x;
double yDiff = a.y - b.y;
return std::sqrt((xDiff * xDiff) + (yDiff * yDiff));
}
int main( int argc, char** argv )
{
Mat src,gray;
src = imread(argv[1]);
if(src.empty())
return -1;
cvtColor( src, gray, COLOR_BGR2GRAY );
gray = gray < 200;
vector<vector<Point> > contours;
findContours(gray.clone(), contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
RotatedRect _minAreaRect;
for (size_t i = 0; i < contours.size(); ++i)
{
double contour_area = contourArea(contours[i]);
_minAreaRect = minAreaRect( Mat(contours[i]) );
Point2f pts[4];
_minAreaRect.points(pts);
double dist0 = distanceBtwPoints(pts[0], pts[1]);
double dist1 = distanceBtwPoints(pts[1], pts[2]);
double angle = 0;
//if(dist0 > dist1 *1.2)
angle =atan2(pts[0].y - pts[1].y,pts[0].x - pts[1].x) * 180.0 / CV_PI;
//if(dist1 > dist0 *1.2)
angle =atan2(pts[1].y - pts[2].y,pts[1].x - pts[2].x) * 180.0 / CV_PI;
if( fabs(angle) > 91 ) // you can try different values
{
if( contour_area < dist0 * dist1 /2 ) // you can try different values
{
//drawContours(src,contours,i,Scalar(0,0,0),-1); // try to uncomment this line
for( int j = 0; j < 4; j++ )
line(src, pts[j], pts[(j+1)%4], Scalar(0, 0, 255), 1, LINE_AA);
}
}
}
imshow("result", src);
waitKey(0);
return 0;
}
I don't think there is an easy solution here unfortunately.
What you might want to try is to detect and label each image component. From here you need to detect which set of pixels looks like a crescent and which does not : as a crescent can be described by a polynomial equations you only need to describe each component (i.e a set of points) as a mathematical equation (using regression methods such as RANSAC) and see if that might be a crescent equation.
I want to create mat files in opencv and initialize them to zero(all the pixels to be black). Thus I use
for initialization purpose:
Mat img = Mat::zeros(image.rows, image.cols, CV_8UC1);
After that I have got some rectangles with locations inside that image and I want to draw the correspondent regions of rectangle white. How is it possible to draw a region in mat file?
I have the following function to draw rects. However I want to draw all the rectangle not just the boundaries.
static Mat image_draw(Mat image, vector<Rect> rect, CvScalar color){
for(int i = 0; i < faces.size(); i++)
{
Point pt1(rect[i].x + rect[i].width, rect[i].y + rect[i].height);
Point pt2(rect[i].x, rect[i].y);
rectangle(image, pt1, pt2, color, 5, 8, 0);
}
return image;
}
The exact thing I want to do is to create a heat map for my rectangles so the overlapped bounding boxes to have higher values(close to 255) that the simple non-overlapped rectangles. I change thickness:
img = image_draw( img, rects, cvScalar(255, 102, 255, 0), -1);
Variable rects contains from 0 to 10 rectangle. I want somehow to aggregate the rectangles drawing. Not just redraw again the rectangles.
If I want to functionize it, is somwthing like that: EDIT final solution:
static Mat heatmap2(Mat image1, vector<Rect> faces, CvScalar color, int thickness) {
cv::Mat heatmap(image1.rows, image1.cols, CV_8U,cv::Scalar(0));
for(int i = 0; i < faces.size(); i++)
{
cv::Mat temp(image1.rows, image1.cols , CV_8U, cv::Scalar(0));
Point pt1(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
Point pt2(faces[i].x, faces[i].y);
rectangle(temp, pt1, pt2, color, thickness, 8, 0);
heatmap+=temp;
}
return heatmap;
}
Try this:
cv::Mat heatmap(200,300,CV_8U,cv::Scalar(0));
{
cv::Mat temp(200,300,CV_8U,cv::Scalar(0));
cv::Rect r(10,20,30,30);
cv::rectangle(temp,r,cv::Scalar(100),-1);
heatmap+=temp;
}
{
cv::Mat temp(200,300,CV_8U,cv::Scalar(0));
cv::Rect r(20,25,30,30);
cv::rectangle(temp,r,cv::Scalar(100),-1);
heatmap+=temp;
}
cv::imshow("Heatmap",heatmap);
cv::waitKey();
Result:
From the official OpenCV Documentation (check here), "Thickness of lines that make up the rectangle. Negative values, like CV_FILLED , mean that the function has to draw a filled rectangle."
So give thickness a negative value like -
rectangle(image, pt1, pt2, color, -1, 8, 0);
UPDATE
Use these lines in your code,
for(int i=0; i < rect.size(); i++)
for( int y = rect[i].y; y < rect[i].y + rect[i].height; y++ )
for( int x = rect[i].x; x < rect[i].x + rect[i].width; x++ )
{
image.at<uchar>(y,x) =
saturate_cast<uchar>( image.at<uchar>(y,x) + 50 );
}
Here each Rect will increase the intensity by 50, and when it reaches 255, it will stay 255.
Input Image
Output Image
2 overlapping rect
Just a slight modification to your code should work:
static void draw_rectangles(Mat image, vector<Rect> faces) {
cv::Mat heatmap(image.rows, image.cols, CV_8U,cv::Scalar(0));
for(int i = 0; i < faces.size(); i++)
{
cv::Mat temp = heatmat(faces[i]); // gives you a submatrix of your heatmap pointing at the location of your rectangle
temp += 10; // add 10 grey levels to the existing values. This also modifies heatmap as side-effect
}
imshow("heatmap", heatmap);
waitKey(0);