Transforming an Image with OpenCV - c++

I'm new to using OpenCV and i'm testing it out trying to grab a licence plate from a car. I'm stuck on how to go about doing that. For example i will start off with an image like this:
and i want my final result to be something like:
I know how to use adaptivethreshold and things i'm confused at the steps need to go from 1 to 2. Thanks for the help!

too many hardcoded thresholds but will this work?
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
int main( int argc, char** argv )
{
Mat src = imread( "C:/test/single/license.jpg");
cvtColor(src,src,CV_BGR2GRAY);
blur( src, src, Size(3,3) );
Canny( src, src, 130, 130*4, 3 );
imshow( "edge", src );
GaussianBlur(src,src,Size(3,3),60);
threshold(src,src,0,255,CV_THRESH_OTSU);
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
findContours(src, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
Mat todraw=Mat::zeros(src.size(), CV_8UC1);
for(size_t i = 0; i < contours.size(); i++)
{
double area = fabs(contourArea(Mat(contours[i])));
if(area<600)
drawContours(todraw,contours,i,Scalar(255),-1);
}
imshow( "plate", todraw );
waitKey(0);
return 0;
}

This is exactly what you want - https://github.com/MasteringOpenCV/code/tree/master/Chapter5_NumberPlateRecognition
Its from the Mastering OpenCV Book. It segments number plates as well as dopes rudimentary OCR to recognise characters.

Related

C++ OpenCV - Find biggest object in an webcam stream and sort it by size

My goal is to find the biggest contour of a captured webcam frame, then after it's found, find its size and determine either to be rejected or accepted.
Just to explain the objetive of this project, i am currently working for a Hygiene product's Manufacturer. There we have, in total, 6 workers that are responsible for sorting the defective soap bars out of the production line. So in order to gain this workforce for other activities, i am trying to write an algorithm to "replace" their eyes.
I've tried several methods along the way (findcontours, SimpleBlobDetection, Canny, Object tracking), but the problem that i've been facing is that i can't seem to find a way to effectively find the biggest object in a webcam image, find its size and then determine to either discard or accept it.
Below follows my newest code to find the biggest contour in an webcam stream:
#include <iostream>
#include "opencv2/highgui/highgui.hpp"
#include "opencv/cv.h"
#include "opencv2\imgproc\imgproc.hpp"
using namespace cv;
using namespace std;
int main(int argc, const char** argv)
{
Mat src;
Mat imgGrayScale;
Mat imgCanny;
Mat imgBlurred;
/// Load source image
VideoCapture capWebcam(0);
if (capWebcam.isOpened() == false)
{
cout << "Não foi possível abrir webcam!" << endl;
return(0);
}
while (capWebcam.isOpened())
{
bool blnframe = capWebcam.read(src);
if (!blnframe || src.empty())
{
cout << "Erro! Frame não lido!\n";
break;
}
int largest_area = 0;
int largest_contour_index = 0;
Rect bounding_rect;
Mat thr(src.rows, src.cols, CV_8UC1);
Mat dst(src.rows, src.cols, CV_8UC1, Scalar::all(0));
cvtColor(src, imgGrayScale, CV_BGR2GRAY); //Convert to gray
GaussianBlur(imgGrayScale, imgBlurred, Size(5, 5), 1.8);
Canny(imgBlurred, imgCanny, 45, 90); //Threshold the gray
vector<vector<Point>> contours; // Vector for storing contour
vector<Vec4i> hierarchy;
findContours(imgCanny, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); // Find the contours in the image
for (int i = 0; i < contours.size(); i++) // iterate through each contour.
{
double a = contourArea(contours[i], false); // Find the area of contour
if (a > largest_area)
{
largest_area = a;
largest_contour_index = i; //Store the index of largest contour
bounding_rect = boundingRect(contours[i]); // Find the bounding rectangle for biggest contour
}
}
Scalar color(255, 255, 255);
drawContours(dst, contours, largest_contour_index, color, CV_FILLED, 8, hierarchy); // Draw the largest contour using previously stored index.
rectangle(src, bounding_rect, Scalar(0, 255, 0), 1, 8, 0);
imshow("src", src);
imshow("largest Contour", dst);
waitKey(27);
}
return(0);
}
And here are the results windows that the program generates and the image of the object that i want to detect and sort.
Thank you all in advance for any clues on how to achieve my goal.

How to count white object on Binary Image?

I'm trying to count object from image. I use logs photo, and I use some steps to get a binary image.
This is my code:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
#include <features2d.hpp>
using namespace cv;
using namespace std;
int main(int argc, char *argv[])
{
//load image
Mat img = imread("kayu.jpg", CV_LOAD_IMAGE_COLOR);
if(img.empty())
return -1;
//namedWindow( "kayu", CV_WINDOW_AUTOSIZE );
imshow("kayu", img);
//convert to b/w
Mat bw;
cvtColor(img, bw, CV_BGR2GRAY);
imshow("bw1", bw);
threshold(bw, bw, 40, 255, CV_THRESH_BINARY);
imshow("bw", bw);
//distance transform & normalisasi
Mat dist;
distanceTransform(bw, dist, CV_DIST_L2, 3);
normalize(dist, dist, 0, 2., NORM_MINMAX);
imshow("dist", dist);
//threshold to draw line
threshold(dist, dist, .5, 1., CV_THRESH_BINARY);
imshow("dist2", dist);
//dist = bw;
//dilasi
Mat dilation, erotion, element;
int dilation_type = MORPH_ELLIPSE;
int dilation_size = 17;
element = getStructuringElement(dilation_type, Size(2*dilation_size + 1, 2*dilation_size+1), Point(dilation_size, dilation_size ));
erode(dist, erotion, element);
int erotionCount = 0;
for(int i=0; i<erotionCount; i++){
erode(erotion, erotion, element);
}
imshow("erotion", erotion);
dilate(erotion, dilation, element);
imshow("dilation", dilation);
waitKey(0);
return 0;
}
As you can see, I use Erosion and Dilation to get better circular object of log. My problem is, I'm stuck at counting the object. I tried SimpleBlobDetector but I got nothing, because when I try to convert the result of "dilation" step to CV_8U, the white object disappear. I got error too when I use findContours(). It say something about channel of image. I can't show the error here, because that's too many step and I already delete it from my code.
Btw, at the end, i got 1 channel of image.
Can i just use it to counting, or am i have to convert it and what is the best method to do it?
Two simple steps:
Find contours for the binarized image.
Get the count of the contours.
Code:
int count_trees(const cv::Mat& bin_image){
cv::Mat img;
if(bin_image.channels()>1){
cv::cvtColor(bin_image,img,cv::COLOR_BGR2GRAY);
}
else{
img=bin_image.clone();;
}
if(img.type()!=CV_8UC1){
img*=255.f; //This could be stupid, but I do not have an environment to try it
img.convertTo(img,CV_8UC1);
}
std::vector<std::vector<cv::Point>> contours
std::vector<Vec4i> hierarchy;
cv::findContours( img, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
return contours.size();
}
I have the same problem, here's an idea I'm about to implement.
1) Represent your image as an array of integers; 0 = black, 1 = white.
2) set N = 2;
3) Scan your image, pixel-by-pixel. Whenever you find a white pixel, activate a flood-fill algorithm, starting at the pixel just found; paint the region with the value of N++;
4) Iterate 3 until you reach the last pixel. (N-2) is the number of regions found.
This method depends on the shape of the objects; mine are more chaotic than yours (wish me luck..). I'll make use of a recursive flood-fill recipe found somewhere (maybe Rosetta Code).
This solution also makes it easy to compute the size of each region.
try to apply that on the your deleted img
// count
for (int i = 0; i< contours.size(); i = hierarchy[i][0]) // iteration sur chaque contour .
{
Rect r = boundingRect(contours[i]);
if (hierarchy[i][2]<0) {
rectangle(canny_output, Point(r.x, r.y), Point(r.x + r.width, r.y + r.height), Scalar(20, 50, 255), 3, 8, 0);
count++;
}
}
cout << "Numeber of contour = " << count << endl;
imshow("src", src);
imshow("contour", dst);
waitKey(0);

ASL hand sign detection opencv

I am a bit new to opencv and could use some help. I want to detect ASL hand signs.
For detecting hands, I can use either detection by skin color or a haar classifier. I already detect hands, but the problem is detecting the hand shape.
I can get the curent hand shape using the algorithm described here, so the problem is how do I compare this shape to my database of shapes?
I tried comparing them using the algorithm described here, which detects similar features images have. The problem is that this will match it with all the hands, since...well it detects them as hands. For instance, check this image, it should point only to V, but it detects features in W and R, too.
I want my final result to be like here, so how can I compare image shapes? Is my approach wrong?
I was thinking that detecting by convexity hull won't work, because most of the signs are closed fists. Check O, for instance, it has no open fingers, so I thought that trying to compare contours would be the best. How to compare them, though? FLANN doesn't seem to work. Or I'm doing it wrong.
Would a Haar cascade classifier work? Or would it detect two hands in different positions as hands as well?
Or is there another way to match shapes? That could solve my problem, but I couldn't find any example that does for custom shapes, only for ones like rectangles, circles and triangles.
Update
Ok, I've been playing a bit with matchShapes as berak told me. Here's my code below(it's a bit messy as I'm testing currently).
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
Mat src; Mat src_gray;
int thresh = 10;
int max_thresh = 300;
/// Function header
void thresh_callback(int, void* );
/** #function main */
int main( int argc, char** argv )
{
/// Load source image and convert it to gray
src = imread( argv[1], 1 );
/// Convert image to gray and blur it
cvtColor( src, src_gray, CV_BGR2GRAY );
blur( src_gray, src_gray, Size(3,3) );
/// Create Window
char* source_window = "Source";
namedWindow( source_window, CV_WINDOW_AUTOSIZE );
imshow( source_window, src );
createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback );
thresh_callback( 0, 0 );
waitKey(0);
return(0);
}
/** #function thresh_callback */
void thresh_callback(int, void* )
{
Mat canny_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
double largest_area=0;
int largest_contour_index=0;
Rect bounding_rect;
/// Detect edges using canny
Canny( src_gray, canny_output, thresh, thresh*2, 3 );
/// Find contours
findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
cout<<contours.size()<<endl;
/// Draw contours
Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 );
vector<vector<Point> >hull( contours.size() );
for( int i = 0; i< contours.size(); i++ )
{ Scalar color = Scalar( 255,255,255 );
convexHull( Mat(contours[i]), hull[i], false );
// imshow("conturul"+to_string(i), drawing );
double a=contourArea( hull[i],false); // Find the area of contour
if(a>largest_area){
largest_area=a;
largest_contour_index=i; //Store the index of largest contour
bounding_rect=boundingRect(hull[i]);}
}
cout<<"zaindex "<<largest_contour_index<<endl;
Scalar color = Scalar( 255,255,255 );
drawContours( drawing, hull, largest_contour_index, color, 2, 8, hierarchy, 0, Point() );
namedWindow( "maxim", CV_WINDOW_AUTOSIZE );
imshow( "maxim", drawing );
Mat rects=imread( "scene.png", 1 );
rectangle(rects, bounding_rect, Scalar(0,255,0),1, 8,0);
imshow( "maxim2", rects );
/// Show in a window
}
The problem with it is the definition of a contour. These hand 'contours' are actually made of multiple contours themselves and that image that I showed earlier is actually made of these multiple contours but overlapped with eachother. matchShapes accepts arrays of Points as parameters, but the contours are arrays of arrays of Points.
So my question is, how can I add my contours vector with itself so I can pass it to matchShapes? In other words, how can I make a single contour from multiple overlapped contours?

OpenCV C++ How to write a function which does the same Matlab's bwareaopen function?

I'm trying to convert bwareaopen function to OpenCV C++ ...
I Found this code but it is not working correctly.
So if anyone have solved this problem and can help me, I would be really delighted.
void removeSmallBlobs(cv::Mat& im, double size)
{
// Only accept CV_8UC1
if (im.channels() != 1 || im.type() != CV_8U)
return;
// Find all contours
std::vector<std::vector<cv::Point> > contours;
cv::findContours(im.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
for (int i = 0; i < contours.size(); i++)
{
// Calculate contour area
double area = cv::contourArea(contours[i]);
// Remove small objects by drawing the contour with black color
if (area > 0 && area <= size)
cv::drawContours(im, contours, i, CV_RGB(0, 0, 0), -1);
}
}
I think you need opening morphological operation. Here you can see an example.
Or take a look here: How to filter small segments from image in OpenCV?
I am using cvBlobsLib to implement such function in opencv. You should first compile and include the cvBlobsLib in your project. The library link is here:cvBlobsLib
Because matlab canny function does the Gaussian blur by default but opencv doesn't, you should first Gaussian blur the image to reduce noise. Then you dectect the canny edge, then you delete the edges that is shorter or longer than a given length measured by pixel.
Here is my code.
#include <highgui/highgui.hpp>
#include <imgproc/imgproc.hpp>
#include "BlobResult.h"
using namespace std;
using namespace cv;
void bwareaopen( Mat& img, int size);
int main()
{
Mat img;
img = imread("1.jpg");
Mat gray;
cvtColor(img,gray,CV_BGR2GRAY);
GaussianBlur( gray, gray, Size(7, 7), 2, 2);
Mat edges;
Canny(gray,edges,50,500,5,true);
imshow("raw edge",edges);
bwareaopen( edges, 800);
imshow("edge",edges);
waitKey(0);
}
void bwareaopen( Mat& img, int size)
{
CBlobResult blobs;
blobs = CBlobResult( img ,Mat(),4);
blobs.Filter( blobs, B_INCLUDE, CBlobGetLength(), B_GREATER, size );
Mat newimg(img.size(),img.type());
newimg.setTo(0);
for(int i=0;i<blobs.GetNumBlobs();i++)
{
blobs.GetBlob(i)->FillBlob(newimg,CV_RGB(255,255,255),0,0,true);
}
img = newimg;
}
I had the same problem. I changed the line
if (area > 0 && area <= size) to
if (area <= size)
This is after I found that many small blobs had area 0. That worked for me.

OpenCV2.4.6 and VS12 error on contour drawing

I am trying to extract and draw contours from an image.
The code I am chosing is mostly taken from the opencv sample code, But When ever I run this code , I got an exception error with message
"First-chance exception at 0x000007FEFDCA9E5D in test1.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000000028EB40.
If there is a handler for this exception, the program may be safely continued."
, I've been looking for a solution but rather I found similar issue .
Is there any solution on following code, because it is most common code if anyone tries to draw contours.
OR I would be very happy with others who are facing same error , if someone put an optimal or common solution for contour extraction
`
#include "stdafx.h"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include "opencv2\imgproc\imgproc.hpp"
using namespace cv;
using namespace std;
int main( int argc, const char** argv )
{
Mat img1 = imread("ima1.JPG", CV_LOAD_IMAGE_UNCHANGED);
Mat canny_img1;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
RNG rng(12345);
/*Using canny filter for feature extraction in image 1*/
Canny(img1,canny_img1,1,3,3,0);
/* Find contours*/
findContours( canny_img1 , contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/* Draw contours */
// Mat drawing = Mat::zeros( canny_img1.size(), CV_8UC3 );
/// Draw contours
Mat drawing = Mat::zeros( canny_img1.size(), CV_8UC3 );
for( int i = 0; i< contours.size(); i++ )
{
Scalar color = Scalar( rng.uniform(0, 255), rng.uniform(0,255), rng.uniform(0,255) );
drawContours( drawing, contours, i, color, 2, 8, hierarchy, 0, Point() );
}
/// Show in a window
namedWindow( "Contours", CV_WINDOW_AUTOSIZE );
imshow( "Contours", drawing );
waitKey(0);
return 0;
}
`
For sample codes to run, you need to have your VS project configured correctly.
You can do this: http://docs.opencv.org/doc/tutorials/introduction/windows_visual_studio_Opencv/windows_visual_studio_Opencv.html?highlight=visual%20studio%20installation
Some of the main points:
You have to go to project properties and set some things:
1) use staticlibrary or dynamic : for that you need to select correctly the appropriate directory in "Additional library directory".
Depending upon the mode in which you are trying to run the project "Debug/Release" if you use staticlib you need to add .lib files with "d" in it before extension.
2) Depending upon the 32 bit or 64 bit project which you are making you need to add appropriate dll directory to System Path.
3) You need to add "Include additional directory to "c\opencv\build\include"
Hope it will help