Finding only big blobs on image - c++

Hello I'm trying to find characters on this image.
This is my image after some preprocessing I recieved this image.
Now I'm trying to do connected component labeling to find blobs. however I get a lot of small blobs too.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs);
int main(int argc, char **argv)
{
Mat img = imread("adaptive.png", 0);
if(!img.data) {
cout << "File not found" << endl;
return -1;
}
namedWindow("binary");
namedWindow("labelled");
Mat output = Mat::zeros(img.size(), CV_8UC3);
Mat binary;
vector < vector<Point2i > > blobs;
threshold(img, binary, 0, 1, THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<Vec3b>(y,x)[0] = b;//Vec3b RGB color order
output.at<Vec3b>(y,x)[1] = g;
output.at<Vec3b>(y,x)[2] = r;
}
}
imshow("binary", img);
imshow("labelled", output);
waitKey(0);
return 0;
}
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs)
{
blobs.clear();
Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
Rect rect;
floodFill(label_image, Point(x,y), label_count, &rect, 0, 0, 4);
vector <Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
so with this algorithm I recieve blobs
but when I do
if(blobs.size()>50) {
blob.push_back(Point2i(j,i));
}
I recieve black screen. however when I try to
if(blob.size()<50){
blob.push_back(Point2i(j,i));
}
I recieve small blobs what can be the actual problem here ?

Guess you want to store those "big" blobs?
If so, change the following code
blobs.push_back(blob);
label_count++;
to this:
if(blob.size() > 50){
blobs.push_back(blob);
}
label_count++;
And you can receive picture like this:

Related

Coding dedicated function Average Filter to color Images C++ OpenCV

So basically, I have to code my own function in C++ with OpenCV, that will apply average filter on both gray and color images.
The function returns a Mat Object, have a mat Object and the size of the average filter (3 for 3x3 matrix of filtering for example).
I did this for the moment, it doesn't work, and I don't know how to extend it to color.
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat filtrageMoyen(Mat image, int tailleZonage) {
Mat imageRetour;
imageRetour = image.clone();
Scalar intensite = 0;
int cadrillage = tailleZonage / 2;
int valeurMoyenne = 0;
for (size_t x = 0; x < imageRetour.rows; x++)
{
for (size_t y = 0; y < imageRetour.cols; y++)
{
for (size_t xZonage = 0; xZonage < cadrillage; xZonage++)
{
for (size_t yZonage = 0; yZonage < cadrillage; yZonage++)
{
valeurMoyenne += (image.at<unsigned char>(x+xZonage, y + yZonage));
}
}
imageRetour.at<unsigned char>(x, y) = valeurMoyenne;
valeurMoyenne = 0;
}
}
return imageRetour;
}
int main() {
Mat img;
string filename = "imageRickRoll.png";
img = imread(filename, cv::IMREAD_GRAYSCALE);
imshow("Image filtree", filtrageMoyen(img, 5));
waitKey(0);
return 0;
}

C++ OpenCV How can I get the values of Binary Histogram image 0 and 255?

My code shows me values that are not accurate and i am not sure what else to try. My goal is to get the values of y such as rows, so that I can read the image and put it in an array. Ive looked at examples and Stack Overflow is literally my last option.
#include<iostream>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
int main()
{
Mat Rgb;
Mat Grey;
Mat image;
//Mat Histogram;
Rgb = imread("license.jpg", WINDOW_AUTOSIZE);
cvtColor(Rgb, Grey, cv::COLOR_BGR2GRAY);
threshold(Grey, image, 150, 250, THRESH_BINARY);
int histogram[255];
for (int i = 0; i < 255; i++)
{
histogram[i] = 0;
}
for (int y = 0; y < image.rows; y++)
//for (int x = 0; x < image.cols; x++)
histogram[(int)image.at<uchar>(y)]++;
//histogram[(int)image.at<uchar>(y, x)]++;
for (int i = 0; i < 255; i++)
cout << histogram[i] << " ";
// draw the histograms
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound((double)hist_w / 255);
Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
int max = histogram[0];
for (int i = 1; i < 256; i++) {
if (max < histogram[i]) {
max = histogram[i];
}
}
for (int i = 0; i < 255; i++) {
histogram[i] = ((double)histogram[i] / max)*histImage.rows;
}
for (int i = 0; i < 255; i++)
{
line(histImage, Point(bin_w*(i), hist_h),
Point(bin_w*(i), hist_h - histogram[i]),
Scalar(0, 0, 0), 1, 8, 0);
}
imshow("Image", image);
waitKey(0);
cv::destroyAllWindows();
return 0;
}
Results have numbers like 319 and other values and I am only looking to get 0 or 255

How to improve otsu threshold output

I am using otsu threshold on an image.
Here is the input image :
Here is the output :
Here is the code I am using:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
title("Text Extractor");
string win_name = "textextractor";
Mat img_a;
img_a = imread("../input/test_c.jpg");
Mat img_a_gray;
cvtColor(img_a, img_a_gray, CV_BGR2GRAY);
Mat img_a_blur;
GaussianBlur(img_a_gray, img_a_blur, Size(3, 3), 0, 0);
Mat img_a_thres;
// adaptiveThreshold(img_a_blur, img_a_thres, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 5, 4);
threshold(img_a_blur, img_a_thres, 0, 255, THRESH_OTSU);
namedWindow(win_name + "_a", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_a", img_a_thres);
imwrite("../output/output_a.jpg", img_a_thres);
waitKey(0);
return 0;
}
The problem is that output has a black region on the bottom and on the left. What can I do to minimize/remove this ?
Edit:
I tried equalizeHist() and I am getting this:
Will try out breaking image into pieces and working them separately.
Sorry, my bad. The previous one is using adaptive filtering. Using Otsu I get this:
There is no change in otsu's output :/
Edit 2: Completed the Feng Tan algorithm, it gives better results but text looses clarity.
Code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/photo/photo.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
string win_name = "textextractor";
Mat img_c;
img_c = imread("../input/sample.jpg");
Mat img_c_gray;
cvtColor(img_c, img_c_gray, CV_BGR2GRAY);
Mat img_c_bin = Mat::zeros(img_c_gray.rows, img_c_gray.cols, CV_8UC1);
int s_win = 17;
int l_win = 35;
double min_tau = 10;
Rect roi_s = Rect(-s_win/2, -s_win/2, s_win, s_win);
Rect roi_l = Rect(-l_win/2, -l_win/2, l_win, l_win);
Rect img_c_roi = Rect(0, 0, img_c_gray.cols, img_c_gray.rows);
for (size_t r = 0; r < img_c_gray.rows; r++) {
for (size_t c = 0; c < img_c_gray.cols; c++) {
double pthres = 255;
Rect sROI = roi_s + Point(c, r);
sROI = sROI & img_c_roi;
if(sROI.width == 0 || sROI.height == 0) {
continue;
}
Rect lROI = roi_l + Point(c, r);
lROI = lROI & img_c_roi;
if(lROI.width == 0 || lROI.height == 0) {
continue;
}
Mat sROI_gray = img_c_gray(sROI);
Mat lROI_gray = img_c_gray(lROI);
double s_stdDev = 0;
double l_stdDev = 0;
double s_mean = 0;
double l_mean = 0;
double l_min = DBL_MAX;
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
s_mean += sROI_gray.at<unsigned char>(r, c);
}
}
s_mean = s_mean / static_cast<double> (sROI_gray.cols * sROI_gray.rows);
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
double diff = sROI_gray.at<unsigned char> (r, c) - s_mean;
s_stdDev += diff * diff;
}
}
s_stdDev = sqrt(s_stdDev / static_cast<int> (sROI_gray.cols * sROI_gray.rows));
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
l_mean += lROI_gray.at<unsigned char> (c, r);
if(lROI_gray.at<unsigned char> (r, c) < l_min) {
l_min = lROI_gray.at<unsigned char> (r, c);
}
}
}
l_mean = l_mean / static_cast<double> (lROI_gray.cols * lROI_gray.rows);
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
double diff = lROI_gray.at<unsigned char> (r, c) - l_mean;
l_stdDev += diff * diff;
}
}
l_stdDev = sqrt(l_stdDev / static_cast<double> (lROI_gray.cols * lROI_gray.rows));
double tau = ((s_mean - l_min) * (1 - s_stdDev / l_stdDev)) / 2.0;
if(tau < min_tau) {
tau = min_tau;
}
double threshold = s_mean - tau;
unsigned char pixel_val = img_c_gray.at<unsigned char>(r, c);
if(pixel_val >= threshold) {
img_c_bin.at<unsigned char> (r, c) = 255;
} else {
img_c_bin.at<unsigned char> (r, c) = 0;
}
}
}
namedWindow(win_name + "_c", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_c", img_c_bin);
imwrite("../output/output_c.jpg", img_c_bin);
waitKey(0);
return 0;
}
Output:
This is what I was able to obtain after some trial and run. Initially I median blurred the original image. Then I applied adpative threshold to the blurred image.
This is what I got:
1. Adaptive Threshold using Gaussian filter:
2. Adaptive Threshold using Mean filter:
From here on you can carry out a series of morphological operations that best suits your final image. :)
You should try using CLAHE.
I tried it on MATLAB using:
Ia = imread('FHXTJ.jpg');
I = rgb2gray(Ia);
A = adapthisteq(I, 'clipLimit', 0.02, 'Distribution', 'rayleigh');
Result:
Note: You can apply thresholding on this image. Otsu should work fine now.

Create a single image from images array

Hi I'm trying to create a single image from multiple images in opencv.
images I use are the same size.
what I do is reshaping them to single line and then try to merge them together with my new image.
I create new image with size of 2 images and pass the array but I recieve error EXC_BAD_ACCESS(code=1, address = ..)
note: sizes of images are correct
size of single image : [170569 x 1]
size of new_image : [170569 x 2]
my code is below.
thank you
int main(){
Mat image[2];
image[0]= imread("image1.jpg",0);
image[1]= imread("image2.jpg",0);
image[0] = image[0].reshape(0, 1); //SINGLE LINE
image[1] = image[1].reshape(0, 1); //SINGLE LINE
int size = sizeof(image)/sizeof(Mat);
Mat new_image(image[0].cols,size,CV_32FC1,image);
}
Mat new_image;
vconcat(image[0],image[1],new_image);
If I understand well than you need to concatenate 2 image of same size into one Mat. I wrote this a very quick code to perform this task.
U can change the argument to the function to be a pointer and add other handlers to care about the variant size image.
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/highgui/highgui.hpp>
cv::Mat cvConcatenateMat(const cv::Mat &image1, const cv::Mat &image2, bool isCol CV_DEFAULT(true)){
if (isCol) {
cv::Mat mergeMat = cv::Mat(image1.rows, image1.cols + image2.cols, image1.type());
for (int j = 0; j < image1.rows; j++) {
for (int i = 0; i < image1.cols; i++) {
mergeMat.at<cv::Vec3b>(j,i) = image1.at<cv::Vec3b>(j,i);
}
for (int i = image1.cols; i < mergeMat.cols; i++) {
mergeMat.at<cv::Vec3b>(j,i) = image2.at<cv::Vec3b>(j,i);
}
}
return mergeMat;
} else {
cv::Mat mergeMat = cv::Mat(image1.rows + image2.rows, image1.cols, image1.type());
for (int j = 0; j < image1.cols; j++) {
for (int i = 0; i < image1.rows; i++) {
mergeMat.at<cv::Vec3b>(i,j) = image1.at<cv::Vec3b>(i,j);
}
for (int i = image1.rows; i < mergeMat.rows; i++) {
mergeMat.at<cv::Vec3b>(i,j) = image2.at<cv::Vec3b>(i-image1.rows,j);
}
}
return mergeMat;
}
}
int main(int argc, const char * argv[]) {
cv::Mat image1 = cv::imread("img1.jpg");
cv::Mat image2 = cv::imread("img2.jpg");
cv::resize(image2, image2, image1.size());
cv::Mat outImage = cvConcatenateMat(image1, image2, false);
cv::imshow("out image", outImage);
cv::waitKey(0);
return 0;
}

Floodfill function in OpenCV

What is the meaning of floating range and fixed range given in the documentation of floodfill function??
I used the floodfill function to a grayscale image shown below. The image has three regions of varying intensities.
outer rectangle = 170
inner ellipse = 175
inner rectangle = 180
I want to floodfill the regions of 170 and 175 together as single connected component and region with 180 as separate one.
I modified the code from here and function as follows:
#include <iostream>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs);
int main(int argc, char **argv)
{
cv::Mat img = cv::imread("blob.png", 0); // force greyscale
if(!img.data) {
std::cout << "File not found" << std::endl;
return -1;
}
cv::namedWindow("binary");
cv::namedWindow("labelled");
cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3);
cv::Mat binary=img.clone();
std::vector < std::vector<cv::Point2i > > blobs;
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<cv::Vec3b>(y,x)[0] = b;
output.at<cv::Vec3b>(y,x)[1] = g;
output.at<cv::Vec3b>(y,x)[2] = r;
}
}
cv::imshow("binary", img);
cv::imshow("labelled", output);
cv::waitKey(0);
return 0;
}
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs)
{
blobs.clear();
cv::Mat label_image;
binary.convertTo(label_image, CV_32FC1);
int label_count = 2;
for(int y=0; y < binary.rows; y++) {
{
for(int x=0; x < binary.cols; x++) {
{ if((int)label_image.at<float>(y,x) < 150) { //start labelling only when pixel > 150
{
continue;
}
cv::Rect rect;
cv::floodFill(label_image, cv::Point(x,y), cv::Scalar(label_count), &rect, cv::Scalar(0), cv::Scalar(6), 4+CV_FLOODFILL_FIXED_RANGE);
std::vector <cv::Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
{ for(int j=rect.x; j < (rect.x+rect.width); j++) {
{ if((int)label_image.at<float>(i,j) != label_count) {
{ continue;
}
blob.push_back(cv::Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
I used fixed range using the flag CV_FLOODFILL_FIXED_RANGE(is it correct the way I used??)
I specify the loDiff=0 and upDiff=6.
I expected that when seed becomes 170, all points in range 170-0 to 170+6 ie 170 to 176 (outer rectangle & inner ellipse) are floodfilled with same label and since inner rectangle is 180, it would have different label.
However I get the output as below:-
the outer rectangle and inner ellipse are not having the same label. What might be the mistake?
expected o/p : inner ellipse also be of orange color(same as outer rectangle)