Removing lines from image - c++

I am a beginner in OpenCV, I need to remove the horizontal and vertical lines in the image so that only the text remains ( The lines were causing trouble when extracting text in ocr ). I am trying to extract text from the Nutrient Fact Table. Can anyone help me?

This was an interesting question, so I gave it a shot. Below I will show you how to extract and remove horizontal and vertical lines. You could extrapolate from it. Also, for sake of saving time, I did not preprocess your image to crop out the background as one should, which is an avenue for improvement.
The result:
The code (edit: added vertical lines):
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
int main(int, char** argv)
{
// Load the image
Mat src = imread(argv[1]);
// Check if image is loaded fine
if(!src.data)
cerr << "Problem loading image!!!" << endl;
Mat gray;
if (src.channels() == 3)
{
cvtColor(src, gray, CV_BGR2GRAY);
}
else
{
gray = src;
}
//inverse binary img
Mat bw;
//this will hold the result, image to be passed to OCR
Mat fin;
//I find OTSU binarization best for text.
//Would perform better if background had been cropped out
threshold(gray, bw, 0, 255, THRESH_BINARY_INV | THRESH_OTSU);
threshold(gray, fin, 0, 255, THRESH_BINARY | THRESH_OTSU);
imshow("binary", bw);
Mat dst;
Canny( fin, dst, 50, 200, 3 );
Mat str = getStructuringElement(MORPH_RECT, Size(3,3));
dilate(dst, dst, str, Point(-1, -1), 3);
imshow("dilated_canny", dst);
//bitwise_and w/ canny image helps w/ background noise
bitwise_and(bw, dst, dst);
imshow("and", dst);
Mat horizontal = dst.clone();
Mat vertical = dst.clone();
fin = ~dst;
//Image that will be horizontal lines
Mat horizontal = bw.clone();
//Selected this value arbitrarily
int horizontalsize = horizontal.cols / 30;
Mat horizontalStructure = getStructuringElement(MORPH_RECT, Size(horizontalsize,1));
erode(horizontal, horizontal, horizontalStructure, Point(-1, -1));
dilate(horizontal, horizontal, horizontalStructure, Point(-1, -1), 1);
imshow("horizontal_lines", horizontal);
//Need to find horizontal contours, so as to not damage letters
vector<Vec4i> hierarchy;
vector<vector<Point> >contours;
findContours(horizontal, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);
for (const auto& c : contours)
{
Rect r = boundingRect(c);
float percentage_height = (float)r.height / (float)src.rows;
float percentage_width = (float)r.width / (float)src.cols;
//These exclude contours that probably are not dividing lines
if (percentage_height > 0.05)
continue;
if (percentage_width < 0.50)
continue;
//fills in line with white rectange
rectangle(fin, r, Scalar(255,255,255), CV_FILLED);
}
int verticalsize = vertical.rows / 30;
Mat verticalStructure = getStructuringElement(MORPH_RECT, Size(1,verticalsize));
erode(vertical, vertical, verticalStructure, Point(-1, -1));
dilate(vertical, vertical, verticalStructure, Point(-1, -1), 1);
imshow("verticalal", vertical);
findContours(vertical, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);
for (const auto& c : contours)
{
Rect r = boundingRect(c);
float percentage_height = (float)r.height / (float)src.rows;
float percentage_width = (float)r.width / (float)src.cols;
//These exclude contours that probably are not dividing lines
if (percentage_width > 0.05)
continue;
if (percentage_height < 0.50)
continue;
//fills in line with white rectange
rectangle(fin, r, Scalar(255,255,255), CV_FILLED);
}
imshow("Result", fin);
waitKey(0);
return 0;
}
The limitations of this approach are that the lines need to be straight. Due to the curve in the bottom line, it cuts slightly into "E" in "Energy". Perhaps with a hough line detection like suggested (I've never used it), a similar but more robust approach could be devised. Also, filling in the lines with rectangles probably is not the best approach.

Related

Basic Shape recognition (openCV C++)

I have a project that I need to make for classes and I chose task that is a bit out of my skills.
Target is to count result of dice rolls.
For now, I'am trying to make it work on a sample pic:
sample pic of dices
and my current code is added below:
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"
using namespace cv;
using namespace std;
Mat KostkaFunkcja(Mat image, Mat in, Scalar low, Scalar high);
int getMaxAreaContourId(vector <vector<cv::Point>> contours);
vector<Point> contoursConvexHull(vector<vector<Point> > contours, int index);
Mat ZnakiFunkcja(Mat image, Mat in, Scalar low, Scalar high);
int main(int argc, char** argv)
{
Mat image;
image = imread("kostki.jpg", CV_LOAD_IMAGE_COLOR);
if (!image.data)
{
cout << "Could not open or find the image" << std::endl;
return -1;
}
Mat imgHSV;
Mat workimage = image;
cvtColor(workimage, imgHSV, COLOR_BGR2HSV); //Convert the captured frame from BGR to HSV
//red dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(146, 0, 31), Scalar(179, 255, 255));
//green dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(25, 147, 0), Scalar(98, 255, 154));
//yellow dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(22, 45, 161), Scalar(91, 255, 255));
//black dice
workimage = KostkaFunkcja(workimage, imgHSV, Scalar(98, 0, 0), Scalar(179, 232, 107));
//white symbols
workimage = ZnakiFunkcja(workimage, imgHSV, Scalar(58, 0, 183), Scalar(179, 145, 255));
namedWindow("Kostki_kontur", CV_WINDOW_AUTOSIZE);
imshow("Kostki_kontur", workimage);
waitKey(0);
return 0;
}
Mat KostkaFunkcja(Mat image, Mat in, Scalar low, Scalar high)
{
Mat temp;
inRange(in, low, high, temp);
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
Mat srcBlur, srcCanny;
blur(temp, srcBlur, Size(3, 3));
Canny(srcBlur, srcCanny, 0, 100, 3, true);
vector<vector<Point> > contours;
findContours(srcCanny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
int largest_contour_index = getMaxAreaContourId(contours);
Mat drawing = Mat::zeros(srcCanny.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2);
}
vector<Point> ConvexHullPoints = contoursConvexHull(contours, largest_contour_index);
polylines(image, ConvexHullPoints, true, Scalar(0, 0, 255), 2);
return image;
}
vector<Point> contoursConvexHull(vector<vector<Point> > contours, int index)
{
vector<Point> result;
vector<Point> pts;
for (size_t j = 0; j< contours[index].size(); j++)
pts.push_back(contours[index][j]);
convexHull(pts, result);
return result;
}
int getMaxAreaContourId(vector <vector<cv::Point>> contours)
{
double maxArea = 0;
int maxAreaContourId = -1;
for (int j = 0; j < contours.size(); j++) {
double newArea = cv::contourArea(contours.at(j));
if (newArea > maxArea) {
maxArea = newArea;
maxAreaContourId = j;
}
return maxAreaContourId;
}
}
Mat ZnakiFunkcja(Mat image, Mat in, Scalar low, Scalar high)
{
Mat temp;
inRange(in, low, high, temp);
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
dilate(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
erode(temp, temp, getStructuringElement(MORPH_ELLIPSE, Size(5, 5)));
Mat srcBlur, srcCanny;
blur(temp, srcBlur, Size(3, 3));
Canny(srcBlur, srcCanny, 0, 100, 3, true);
vector<vector<Point> > contours;
findContours(srcCanny, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE);
Mat drawing = Mat::zeros(srcCanny.size(), CV_8UC3);
for (int i = 0; i< contours.size(); i++)
{
Scalar color = Scalar(255, 255, 255);
drawContours(drawing, contours, i, color, 2);
polylines(image, contours, true, Scalar(0, 0, 255), 2);
return image;
}
}
Yet I have no idea how to count different shapes (hearts, lightnings, shields, numbers).
I will be greatfull if anybody would give me a tip or solution of how to do the job.
1) sorry for bad english
2) we had no openCV in classes [only basic c++]
3) tryed to found anything usefull on internet, but even if I found anything, I could't understand what was happening in the code
Your project can be splited in three steps:
find the dices.
extract the shapes from the visible face of
the dices.
count the faces.
For the first step among all the possible approaches I think saliency map approaches can help.
Saliency map are a family of segmentation algorithm that aim to detect the parts in the image which are more likely to attract visual attention.
OpenCV have a saliency API that already implement several saliency algorithm and for each of them you can get an segmentation map.
It is highlikely considering the example image you gave the saliency will be focus on the dices.
From this you can so extract the dices as rois from the original image.
For the step 2) saliency algorithms may also fit... or not that depend a lot of the statistical criterions that are used by the algorithm.
However the previously extracted rois should only contain the face of the dice that does contain the shapes you want to count in step 3) so approaches based on contours detection may give quite good result.
Once you get the shapes among the way to count each shape you can use templateMatching (that is also already implement in OpenCV),a clustering approach based on the a shape sensitive metric (Hausdorff, Dice, ...), or many other.
Here is a code that can help you to deal with the two first step.
#ifndef _DEBUG
#define _DEBUG
#endif
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/saliency.hpp>
#include <opencv2/highgui.hpp>
#include <list>
CV_EXPORTS_W void get_regions_of_interest(cv::InputArray _src, cv::OutputArrayOfArrays mv, cv::OutputArrayOfArrays mv2 = cv::noArray());
int main()
{
cv::Mat tmp = cv::imread("C:\Desktop\dices.jpg");
if(!tmp.empty())
{
cv::imshow("source",tmp);
cv::waitKey(-1);
}
std::vector<cv::Mat> rois;
get_regions_of_interest(tmp,rois);
std::cout << "Hello World!" << std::endl;
return 0;
}
void get_regions_of_interest(cv::InputArray _src, cv::OutputArrayOfArrays _rois, cv::OutputArrayOfArrays _contours)
{
// Check that the first argument is an image and the second a vector of images.
CV_Assert(_src.isMat() && !_src.depth() && (_src.channels() == 1 || _src.channels() == 3) && _rois.isMatVector() && (!_contours.needed() || (_contours.needed() && _contours.isMatVector()) ) );
static cv::Ptr<cv::saliency::StaticSaliencySpectralResidual> saliency;
if(!saliency)
saliency = cv::saliency::StaticSaliencySpectralResidual::create();
cv::Mat src = _src.getMat();
cv::Mat gray;
if(src.depth() == src.type())
gray = src;
else
cv::cvtColor(src,gray,cv::COLOR_BGR2GRAY);
bool is_ctr_needed = _contours.needed();
std::list<cv::Mat> final_ctrs;
// Step 1) Process the saliency in order to segment the dices.
cv::Mat saliency_map;
cv::Mat binary_map;
saliency->computeSaliency(src,saliency_map);
saliency->computeBinaryMap(saliency_map,binary_map);
saliency_map.release();
// Step 2) From the binary map get the regions of interest.
cv::Mat1i stats;
std::vector<cv::Mat> rois;
cv::Mat labels;
cv::Mat centroids;
cv::connectedComponentsWithStats(binary_map, labels, stats, centroids);
labels.release();
centroids.release();
// prepare the memory
rois.reserve(stats.rows-1);
// Sort the stats in order to remove the background.
stats = stats.colRange(0,stats.cols-1);
// Extract the rois.
for(int i=0;i<stats.rows;i++)
{
cv::Rect roi = *reinterpret_cast<cv::Rect*>(stats.ptr<int>(i));
if(static_cast<std::size_t>(roi.area()) == gray.total())
continue;
rois.push_back(gray(roi));
#ifdef _DEBUG
cv::imshow("roi_"+std::to_string(i),gray(roi));
#endif
}
// Step 3) Refine.
// Because the final number of shape cannot be determine in advance it is better to use a linked list than a vector.
// In practice except if there is a huge number of elements to work with the performance will be almost the same.
std::list<cv::Mat> shapes;
int cnt=0;
for(const cv::Mat& roi : rois)
{
cv::Mat tmp = roi.clone();
// Slightly sharpen the regions contours
cv::morphologyEx(tmp,tmp, cv::MORPH_CLOSE, cv::noArray());
// Reduce the influence of local unhomogeneous illumination.
cv::GaussianBlur(tmp,tmp,cv::Size(31,31), 5);
cv::Mat thresh;
// Binarize the image.
cv::threshold(roi,thresh,0.,255.,cv::THRESH_BINARY | cv::THRESH_OTSU);
#ifdef _DEBUG
cv::imshow("thresh"+std::to_string(cnt++),thresh);
#endif
// Find the contours of each sub region on interest
std::vector<cv::Mat> contours;
cv::findContours(thresh, contours, cv::RETR_TREE, cv::CHAIN_APPROX_SIMPLE);
cv::Mat dc;
cv::merge(std::vector<cv::Mat>(3,thresh),dc);
// cv::drawContours(dc, contours,-1,cv::Scalar(0.,0.,255),2);
// cv::imshow("ctrs"+std::to_string(cnt),dc);
// Extract the sub-regions
if(is_ctr_needed)
{
for(const cv::Mat& ctrs: contours)
{
cv::Rect croi = cv::boundingRect(ctrs);
// If the sub region is to big or to small it is depreate
if(static_cast<std::size_t>(croi.area()) == roi.total() || croi.area()<50)
continue;
final_ctrs.push_back(ctrs);
shapes.push_back(roi(croi));
#ifdef _DEBUG
cv::rectangle(dc,croi,cv::Scalar(0.,0.,255.));
cv::imshow("sub_roi_"+std::to_string(cnt++),roi(croi));
#endif
}
}
else
{
for(const cv::Mat& ctrs: contours)
{
cv::Rect croi = cv::boundingRect(ctrs);
// If the sub region is to big or to small it is depreate
if(static_cast<std::size_t>(croi.area()) == roi.total() || croi.area()<50)
continue;
shapes.push_back(roi(croi));
#ifdef _DEBUG
cv::rectangle(dc,croi,cv::Scalar(0.,0.,255.));
cv::imshow("sub_roi_"+std::to_string(cnt++),roi(croi));
#endif
}
}
}
#ifdef _DEBUG
cv::waitKey(-1);
#endif
// Final Step: set the output
_rois.create(shapes.size(),1,CV_8U);
_rois.assign(std::vector<cv::Mat>(shapes.begin(),shapes.end()));
if(is_ctr_needed)
{
_contours.create(final_ctrs.size(),1,CV_32SC2);
_contours.assign(std::vector<cv::Mat>(final_ctrs.begin(), final_ctrs.end()));
}
}

OpenCV: how can I interpret the results of inRange?

I am processing video images and I would like to detect if the video contains any pixels of a certain range of red. Is this possible?
Here is the code I am adapting from a tutorial:
#ifdef __cplusplus
- (void)processImage:(Mat&)image;
{
cv::Mat orig_image = image.clone();
cv::medianBlur(image, image, 3);
cv::Mat hsv_image;
cv::cvtColor(image, hsv_image, cv::COLOR_BGR2HSV);
cv::Mat lower_red_hue_range;
cv::Mat upper_red_hue_range;
cv::inRange(hsv_image, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), lower_red_hue_range);
cv::inRange(hsv_image, cv::Scalar(160, 100, 100), cv::Scalar(179, 255, 255), upper_red_hue_range);
// Interpret values here
}
Interpreting values
I would like to detect if the results from the inRange operations are nil or not. In other words I want to understand if there are any matching pixels in the original image with a colour inRange from the given lower and upper red scale. How can I interpret the results?
First you need to OR the lower and upper mask:
Mat mask = lower_red_hue_range | upper_red_hue_range;
Then you can countNonZero to see if there are non zero pixels (i.e. you found something).
int number_of_non_zero_pixels = countNonZero(mask);
It could be better to first apply morphological erosion or opening to remove small (probably noisy) blobs:
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
morphologyEx(mask, mask, MORPH_OPEN, kernel); // or MORPH_ERODE
or find connected components (findContours, connectedComponentsWithStats) and prune / search for according to some criteria:
vector<vector<Point>> contours
findContours(mask.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
double threshold_on_area = 100.0;
for(int i=0; i<contours.size(); ++i)
{
double area = countourArea(contours[i]);
if(area < threshold_on_area)
{
// don't consider this contour
continue;
}
else
{
// do something (e.g. drawing a bounding box around the contour)
Rect box = boundingRect(contours[i]);
rectangle(hsv_image, box, Scalar(0, 255, 255));
}
}

Skew angle detection on a image with scattered characters

I've been following this tutorial to get the skew angle of an image. It seems like HoughLinesP is struggling to find lines when characters are a bit scattered on the target image.
This is my input image:
This is the lines the HoughLinesP has found:
It's not really getting most of the lines and it seems pretty obvious to me why. This is because I've set my minLineWidth to be (size.width / 2.f). The point is that because of the few lines it has found it turns out that the skew angle is also wrong. (-3.15825 in this case, when it should be something close to 0.5)
I've tried to erode my input file to make characters get closer and in this case it seems to work out, but I don't feel this is best approach for situations akin to it.
This is my eroded input image:
This is the lines the HoughLinesP has found:
This time it has found a skew angle of -0.2185 degrees, which is what I was expecting but in other hand it is losing the vertical space between lines which in my humble opinion isn't a good thing.
Is there another to pre-process this kind of image to make houghLinesP get better results for scattered characters ?
Here is the source code I'm using:
#include <iostream>
#include <opencv2/opencv.hpp>
using namespace std;
static cv::Scalar randomColor( cv::RNG& rng )
{
int icolor = (unsigned) rng;
return cv::Scalar( icolor&255, (icolor>>8)&255, (icolor>>16)&255 );
}
void rotate(cv::Mat& src, double angle, cv::Mat& dst)
{
int len = std::max(src.cols, src.rows);
cv::Point2f pt(len/2., len/2.);
cv::Mat r = cv::getRotationMatrix2D(pt, angle, 1.0);
cv::warpAffine(src, dst, r, cv::Size(len, len));
}
double compute_skew(cv::Mat& src)
{
// Random number generator
cv::RNG rng( 0xFFFFFFFF );
cv::Size size = src.size();
cv::bitwise_not(src, src);
std::vector<cv::Vec4i> lines;
cv::HoughLinesP(src, lines, 1, CV_PI/180, 100, size.width / 2.f, 20);
cv::Mat disp_lines(size, CV_8UC3, cv::Scalar(0, 0, 0));
double angle = 0.;
unsigned nb_lines = lines.size();
for (unsigned i = 0; i < nb_lines; ++i)
{
cv::line(disp_lines, cv::Point(lines[i][0], lines[i][1]),
cv::Point(lines[i][2], lines[i][3]), randomColor(rng));
angle += atan2((double)lines[i][3] - lines[i][1],
(double)lines[i][2] - lines[i][0]);
}
angle /= nb_lines; // mean angle, in radians.
std::cout << angle * 180 / CV_PI << std::endl;
cv::imshow("HoughLinesP", disp_lines);
cv::waitKey(0);
return angle * 180 / CV_PI;
}
int main()
{
// Load in grayscale.
cv::Mat img = cv::imread("IMG_TESTE.jpg", 0);
cv::Mat rotated;
double angle = compute_skew(img);
rotate(img, angle, rotated);
//Show image
cv::imshow("Rotated", rotated);
cv::waitKey(0);
}
Cheers
I'd suggest finding individual components first (i.e., the lines and the letters), for example using cv::threshold and cv::findContours.
Then, you could drop the individual components that are narrow (i.e., the letters). You can do this using cv::floodFill for example. This should leave you with the lines only.
Effectively, getting rid of the letters might provide easier input for the Hough transform.
Try to detect groups of characters as blocks, then find contours of these blocks. Below I've done it using blurring, a morphological opening and a threshold operation.
Mat im = imread("yCK4t.jpg", 0);
Mat blurred;
GaussianBlur(im, blurred, Size(5, 5), 2, 2);
Mat kernel = getStructuringElement(MORPH_ELLIPSE, Size(3, 3));
Mat morph;
morphologyEx(blurred, morph, CV_MOP_OPEN, kernel);
Mat bw;
threshold(morph, bw, 0, 255, CV_THRESH_BINARY_INV | CV_THRESH_OTSU);
Mat cont = Mat::zeros(im.rows, im.cols, CV_8U);
vector<vector<Point>> contours;
vector<Vec4i> hierarchy;
findContours(bw, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
for(int idx = 0; idx >= 0; idx = hierarchy[idx][0])
{
drawContours(cont, contours, idx, Scalar(255, 255, 255), 1);
}
Then use Hough line transform on contour image.
With accumulator threshold 80, I get following lines that results in an angle of -3.81. This is high because of the outlier line that is almost vertical. With this approach, majority of the lines will have similar angle values except few outliers. Detecting and discarding the outliers will give you a better approximation of the angle.
HoughLinesP(cont, lines, 1, CV_PI/180, 80, size.width / 4.0f, size.width / 8.0f);

opencv extracting all horizontal and vertical lines separately using morphological operation

I am new to opencv. I am working with opencv and C++ on visual studio 2013. I have a task to extract all horizontal and vertical lines of a document containing a table separately and use that to extract the cells present in the table.
I can only use morphological operation to achieve that.
Can anyone suggest the procedure to achieve that?
Here is a sample document.
Finally got the output.
Look at the code.
string src = "d://sabari//23.jpg";
Mat im = imread(src);
Mat gray;
if (im.channels() == 3)
{
cvtColor(im, gray, CV_BGR2GRAY);
}
else
{
gray = im;
}
adaptiveThreshold(~gray, gray, 255, CV_ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 15, -2);
Mat vertical = gray.clone();
int horizontalsize = gray.cols / 30;
Mat structure = getStructuringElement(MORPH_RECT, Size(horizontalsize,1));
erode(gray, gray,structure, Point(-1, -1));
dilate(gray, gray,structure, Point(-1, -1));
imshow("ans", gray);
imwrite("d://out2.jpg", gray);
int verticalsize = vertical.rows / 30;
Mat verticalStructure = getStructuringElement(MORPH_RECT, Size( 1,verticalsize));
erode(vertical, vertical, verticalStructure, Point(-1, -1));
dilate(vertical, vertical, verticalStructure, Point(-1, -1));
imshow("ans1", vertical);
imwrite("d://out3.jpg", vertical);

Derivatives in OpenCV

I'm writing a program using opencv that does text detection and extraction.
Im using the Sobel derivative in order to do edge detection and have gotten the following result:
But I wish to get the following result:
(I appologize for the blurry image.)
The problem I'm having is the "blank areas" inside the edges "confuse" the algorithem I'm using so when the algorithem detects the "blank part" seperating between two lines from the lines themselves it gets confused and start running into the letter themselves instead of keepeing between two lines. This error, I believe would be solves by achieving the second result.
Anyone knows what changes i need to make? in the soble derivative? maybe use a different derivative?
Code:
Mat ProfileSeamTextLineExtractor::computeDerivative(){
Mat img = _image;
Mat gradiant_mat;
int scale = 2;
int delta = 0;
int ddepth = CV_16S;
GaussianBlur(img, img, Size(3, 3), 0, 0, BORDER_DEFAULT);
Mat grad_x, grad_y;
Mat abs_grad_x, abs_grad_y;
Sobel(img, grad_x, ddepth, 1, 0, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_x, abs_grad_x);
Sobel(img, grad_y, ddepth, 0, 1, 3, scale, delta, BORDER_DEFAULT);
convertScaleAbs(grad_y, abs_grad_y);
/// Total Gradient (approximate)
addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, gradiant_mat);
return gradiant_mat;
}
Regards,
Try using the second sobel derivative, add, normalize (this may do the same as addWeighted), and then thresholding optimally. I had results similar to yours with different threshold values.
Here's an example:
cv::Mat result;
cvtColor(image, gray, CV_BGR2GRAY);
cv::medianBlur(gray, gray, 3);
cv::Mat sobel_x, sobel_y, result;
cv::Sobel(gray, sobel_x, CV_32FC1, 2, 0, 5);
cv::Sobel(gray, sobel_y, CV_32FC1, 0, 2, 5);
cv::Mat sum = sobel_x + sobel_y;
cv::normalize(sum, result, 0, 255, CV_MINMAX, CV_8UC1);
//Determine optimal threshold value using THRESH_OTSU.
// This didn't give me optimal results, but was a good starting point.
cv::Mat temp, final;
double threshold = cv::threshold(result, temp, 0, 255, CV_THRESH_BINARY+CV_THRESH_OTSU);
cv::threshold(result, final, threshold*.9, 255, CV_THRESH_BINARY);
I was able to clearly extract both light text on a dark background, and dark text on a light background.
If you need the final image to consistently be white background with black text, you can do this:
cv::Scalar avgPixelIntensity = cv::mean( final );
if(avgPixelIntensity[0] < 127.0)
cv::bitwise_not(final, final);
I tried a lot of different text extraction methods and couldn't find any that worked across the board, but this seems to. This took a lot of trial and error to figure out, so I hope this helps.
I don't really understand what your final aim is. Do you eventually want a nice filled in version of the text so you can recognise the characters? I can give that a shot if that's what you are looking for.
This is what I did while trying to remove inner holes:
For this one I didn't bother:
It fails at the edges where the text is cut off.
Obviously, I had to work with the image that had already gone through some processing. I might be able to give you more help if I had the original and produce a better output. You might not even need to use derivatives at all if the background is clean enough.
Here is the code:
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void printInnerContours (int contourPos, Mat &filled, vector<vector<Point2i > > &contours, vector<Vec4i> &hierarchy, int area);
int main() {
int areaThresh;
vector<vector<Point2i > > contours;
vector<Vec4i> hierarchy;
Mat text = imread ("../wHWHA.jpg", 0); //write greyscale
threshold (text, text, 50, 255, THRESH_BINARY);
imwrite ("../text1.jpg", text);
areaThresh = (0.01 * text.rows * text.cols) / 100;
findContours (text, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_NONE);
Mat filled = Mat::zeros(text.rows, text.cols, CV_8U);
cout << contours.size() << endl;
for (int i = 0; i < contours.size(); i++) {
int area = contourArea(contours[i]);
if (area > areaThresh) {
if ((hierarchy[i][2] != -1) && (hierarchy[i][3] == -1)) {
drawContours (filled, contours, i, 255, -1);
if (hierarchy[i][2] != -1) {
printInnerContours (hierarchy[i][2], filled, contours, hierarchy, area);
}
}
}
}
imwrite("../output.jpg", filled);
return 0;
}
void printInnerContours (int contourPos, Mat &filled, vector<vector<Point2i > > &contours, vector<Vec4i> &hierarchy, int area) {
int areaFrac = 5;
if (((contourArea (contours[contourPos]) * 100) / area) < areaFrac) {
//drawContours (filled, contours, contourPos, 0, -1);
}
if (hierarchy[contourPos][2] != -1) {
printInnerContours (hierarchy[contourPos][2], filled, contours, hierarchy, area);
}
if (hierarchy[contourPos][0] != -1) {
printInnerContours (hierarchy[contourPos][0], filled, contours, hierarchy, area);
}
}