I tried to follow this method of drawing orientation map http://answers.opencv.org/question/9493/fingerprint-orientation-map-through-gradient/
And i used a block size of 5x5 on my 480x320 image. The gradients i got was from 0-270 degrees. And there are constant values that keep on repeating like 44.7623 and 224.762. I wonder if my gradients are wrong.
After that, i add all the gradients in the 5x5 block and divide them by 25 (averaging) like what the link said. I divided the degrees into 8 sections of 45degree intervals and plotted them out. But it looks nothing like my original image. Can anyone tell me what's wrong? I just want to detect the core(circle-like) feature of the image.
My original image is this _________________________________________________ But my orientation map is this:
This is what I'm doing
What's wrong ? =(
I got the gradients from this method :
/// Gradient X
cv::Sobel(original_Mat, grad_x, CV_32FC1, 1, 0, 3);
/// Gradient Y
cv::Sobel(original_Mat, grad_y, CV_32FC1, 0, 1, 3);
Mat orientation = Mat(grad_x.rows, grad_y.cols, CV_32F);
for(int i = 0; i < grad_x.rows; i++){
for(int j = 0; j < grad_x.cols; j++){
// Retrieve a single value
float valueX = grad_x.at<float>(i,j);
float valueY = grad_x.at<float>(i,j);
// Calculate the corresponding single direction, done by applying the arctangens function
float result = fastAtan2(valueX,valueY);
// Store in orientation matrix element
orientation.at<float>(i,j) = result;
}
}
Here's the full code.
int main()
{
cv::Mat original_Mat=cv::imread("Source.bmp", 1);
cv::Mat grad = cv::Mat::zeros(original_Mat.size(),CV_64F);
/// Generate grad_x and grad_y
cv::Mat grad_x = cv::Mat::zeros(original_Mat.size(), CV_64F);
cv::Mat grad_y = cv::Mat::zeros(original_Mat.size(), CV_64F);
cv::Mat grad_angle = cv::Mat::zeros(original_Mat.size(), CV_64F);
/// Gradient X
cv::Sobel(original_Mat, grad_x, CV_32FC1, 1, 0, 3);
/// Gradient Y
cv::Sobel(original_Mat, grad_y, CV_32FC1, 0, 1, 3);
Mat orientation = Mat(grad_x.rows, grad_y.cols, CV_32F); //to store the gradients
Mat img=Mat(grad_x.rows, grad_y.cols, CV_32F);//to draw out the map
img = cv::Scalar(255,255,255);//all white
// Calculate orientations of gradients --> in degrees
// Loop over all matrix values and calculate the accompanied orientation
for(int i = 0; i < grad_x.rows; i++){
for(int j = 0; j < grad_x.cols; j++){
// Retrieve a single value
float valueX = grad_x.at<float>(i,j);
float valueY = grad_x.at<float>(i,j);
// Calculate the corresponding single direction, done by applying the arctangens function
float result = fastAtan2(valueX,valueY);
// Store in orientation matrix element
orientation.at<float>(i,j) = result;
}
}
int i=0,j=0;
int x1=0,x2=0;
float results;
for(int l=0;l<96;l++) //to loop all the rows
{
int x1=(5+(l*5)); // to get 5x5 block sizes
for(int k=0;k<64;k++)//to loop all the columns
{
int x2=(5+(k*5)); // to get 5x5 block sizes
results=0;
//to get the total of 5x5 gradient values
for(i=(x1-5); i < x1; i++){
for(j=(x2-5); j < x2; j++){
results=results+orientation.at<float>(i,j);
orientation.at<float>(i,j)=0;
}
}
results=results/25; //averaging the 5x5 block gradients
orientation.at<float>((x1-3),(x2-3))=results; //to store the results in the center of the 5x5 block
}
}
results=0;
//this loop is to draw out the orientation map
for(int i=0;i<480;i++)
{
for(int j=0;j<320;j++)
{
results=orientation.at<float>(i,j);
if ((results<=22.5)&&(results>0)){
results=0;
img.at<int>(i,j)=255;
img.at<int>(i,j+1)=255;
img.at<int>(i,j+2)=255;
}
else if((results>22.5)&&(results<=67.5)){
results=45;
img.at<int>(i,j)=255;
img.at<int>(i-1,j+1)=255;
img.at<int>(i-2,j+2)=255;
}
else if((results>67.5)&&(results<=112.5)){
results=90;
img.at<int>(i,j)=255;
img.at<int>(i-1,j)=255;
img.at<int>(i-2,j)=255;
}
else if((results>112.5)&&(results<=157.5)){
results=135;
img.at<int>(i,j)=255;
img.at<int>(i-1,j-1)=255;
img.at<int>(i-2,j-2)=255;
}
else if((results>157.5)&&(results<=202.5)){
results=180;
img.at<int>(i,j)=255;
img.at<int>(i,j-1)=255;
img.at<int>(i,j-2)=255;
}
else if((results>202.5)&&(results<=247.5)){
results=225;
img.at<int>(i,j)=255;
img.at<int>(i+1,j-1)=255;
img.at<int>(i+2,j-2)=255;
endx=x2-5;
endy=x1-1;
}
else if((results>247.5)&&(results<=292.5)){
results=270;
img.at<int>(i,j)=255;
img.at<int>(i+1,j)=255;
img.at<int>(i+2,j)=255;
}
else if((results>292.5)&&(results<=337.5)){
results=315;
img.at<int>(i,j)=255;
img.at<int>(i+1,j+1)=255;
img.at<int>(i+2,j+2)=255;
}
else
{
results=0;
}
orientation.at<float>(i,j)=results;
}
}
Here is my result:
For image:
I've got result:
The code:
#include <stdio.h>
#include <stdarg.h>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
int main(int argc, char* argv[])
{
namedWindow("source");
namedWindow("result");
namedWindow("ang");
Mat img=imread("D:\\ImagesForTest\\binarized_image.png",0);
cv::threshold(img,img,128,255,cv::THRESH_BINARY);
Mat thinned;
thinned=img.clone(); // Just clone the input
//Thinning(img,thinned); // Not actually needed
cv::GaussianBlur(thinned,thinned,Size(3,3),1.0);
Mat gx,gy,ang,mag;
cv::Sobel(thinned,gx,CV_32FC1,1,0);
cv::Sobel(thinned,gy,CV_32FC1,0,1);
cv::phase(gx,gy,ang,false);
cv::magnitude(gx,gy,mag);
cv::normalize(mag,mag,0,1,cv::NORM_MINMAX);
Mat angRes=Mat::zeros(img.rows*3,img.cols*3,CV_8UC1);
for (int i=0;i< img.rows;i+=2)
{
for (int j=0;j< img.cols;j+=2)
{
int x=j*3;
int y=i*3;
float r=5;
float m=r*(mag.at<float>(i,j));
float dx=m*r*cos(ang.at<float>(i,j));
float dy=m*r*sin(ang.at<float>(i,j));
cv::line(angRes,cv::Point(x,y),cv::Point(x+dx,y+dy),Scalar::all(255),1,CV_AA);
}
}
imshow("ang",angRes);
imshow("source",img);
imshow("result",thinned);
cv::waitKey(0);
}
Enother variant (weighted block averages):
#include <stdio.h>
#include <stdarg.h>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
float GetWeightedAngle(Mat& mag,Mat& ang)
{
float res=0;
float n=0;
for (int i=0;i< mag.rows;++i)
{
for (int j=0;j< mag.cols;++j)
{
res+=ang.at<float>(i,j)*mag.at<float>(i,j);
n+=mag.at<float>(i,j);
}
}
res/=n;
return res;
}
int main(int argc, char* argv[])
{
namedWindow("source");
namedWindow("ang");
Mat img=imread("D:\\ImagesForTest\\binarized_image.png",0);
cv::threshold(img,img,128,255,cv::THRESH_BINARY);
Mat thinned;
thinned=img.clone();
//Thinning(img,thinned);
//cv::GaussianBlur(thinned,thinned,Size(3,3),1.0);
Mat gx,gy,ang,mag;
cv::Sobel(thinned,gx,CV_32FC1,1,0,7);
cv::Sobel(thinned,gy,CV_32FC1,0,1,7);
cv::phase(gx,gy,ang,false);
cv::magnitude(gx,gy,mag);
cv::normalize(mag,mag,0,1,cv::NORM_MINMAX);
Mat angRes=Mat::zeros(img.rows,img.cols,CV_8UC1);
int blockSize=img.cols/15-1;
float r=blockSize;
for (int i=0;i< img.rows-blockSize;i+= blockSize)
{
for (int j=0;j< img.cols-blockSize;j+= blockSize)
{
float a=GetWeightedAngle(mag(Rect(j,i,blockSize,blockSize)),ang(Rect(j,i,blockSize,blockSize)));
float dx=r*cos(a);
float dy=r*sin(a);
int x=j;
int y=i;
cv::line(angRes,cv::Point(x,y),cv::Point(x+dx,y+dy),Scalar::all(255),1,CV_AA);
}
}
imshow("ang",angRes);
imshow("source",img);
cv::waitKey(0);
}
It gives the result image:
Related
I am trying to do Delaunay Triangulation for a set of points in OpenCV, but encountered a problem.
The function takes a matrix of coordinates and return an adjacency matrix. (If there is and edge connecting the point i and the point j, then adj(i,j) = 1, otherwise 0.)
I didn't get it working. The code below give strange results.
Could you please help?
An example of Delaunay Triangulation is given here.
Thank you in advance.
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
using namespace std;
using namespace cv;
Mat delaunay(const Mat& points, int imRows, int imCols)
/// Return the Delaunay triangulation, under the form of an adjacency matrix
/// points is a Nx2 mat containing the coordinates (x, y) of the points
{
Mat adj(points.rows, points.rows, CV_32S, Scalar(0));
/// Create subdiv and insert the points to it
Subdiv2D subdiv(Rect(0,0,imCols,imRows));
for(int p = 0; p < points.rows; p++)
{
float xp = points.at<float>(p, 0);
float yp = points.at<float>(p, 1);
Point2f fp(xp, yp);
subdiv.insert(fp);
}
/// Get the number of edges
vector<Vec4f> edgeList;
subdiv.getEdgeList(edgeList);
int nE = edgeList.size();
/// Check adjacency
for(int e = 1; e <= nE; e++)
{
int p = subdiv.edgeOrg(e); // Edge's origin
int q = subdiv.edgeDst(e); // Edge's destination
if(p < points.rows && q < points.rows)
adj.at<int>(p, q) = 1;
// else
// {
// cout<<p<<", "<<q<<endl;
// assert(p < points.rows && q < points.rows);
// }
}
return adj;
}
int main()
{
Mat points = Mat(100, 2, CV_32F);
randu(points, 0, 99);
int rows = 100, cols = 100;
Mat im(rows, cols, CV_8UC3, Scalar::all(0));
Mat adj = delaunay(points, rows, cols);
for(int i = 0; i < points.rows; i++)
{
int xi = points.at<float>(i,0);
int yi = points.at<float>(i,1);
/// Draw the edges
for(int j = i+1; j < points.rows; j++)
{
if(adj.at<int>(i,j) > 0)
{
int xj = points.at<float>(j,0);
int yj = points.at<float>(j,1);
line(im, Point(xi,yi), Point(xj,yj), Scalar(255,0,0), 1);
}
/// Draw the nodes
circle(im, Point(xi, yi), 1, Scalar(0,0,255), -1);
}
}
namedWindow("im", CV_WINDOW_NORMAL);
imshow("im",im);
waitKey();
return 0;
}
You are inserting into the adjacency matrix the indices of the Subdiv2d edges, which don't correspond to the indices of the points.
You can fix this, for example, storing the points and their index into a std::map. When you retrieve edges from the Subdiv2d, you check that the edges is formed by your points, and not from boundary points added by Subdiv2d. Having stored the point indices, you're now able to built the adjacency matrix correctly.
Have a look at the code:
#include <map>
#include <opencv2/opencv.hpp>
using namespace std;
using namespace cv;
struct lessPoint2f
{
bool operator()(const Point2f& lhs, const Point2f& rhs) const
{
return (lhs.x == rhs.x) ? (lhs.y < rhs.y) : (lhs.x < rhs.x);
}
};
Mat delaunay(const Mat1f& points, int imRows, int imCols)
/// Return the Delaunay triangulation, under the form of an adjacency matrix
/// points is a Nx2 mat containing the coordinates (x, y) of the points
{
map<Point2f, int, lessPoint2f> mappts;
Mat1b adj(points.rows, points.rows, uchar(0));
/// Create subdiv and insert the points to it
Subdiv2D subdiv(Rect(0, 0, imCols, imRows));
for (int p = 0; p < points.rows; p++)
{
float xp = points(p, 0);
float yp = points(p, 1);
Point2f fp(xp, yp);
// Don't add duplicates
if (mappts.count(fp) == 0)
{
// Save point and index
mappts[fp] = p;
subdiv.insert(fp);
}
}
/// Get the number of edges
vector<Vec4f> edgeList;
subdiv.getEdgeList(edgeList);
int nE = edgeList.size();
/// Check adjacency
for (int i = 0; i < nE; i++)
{
Vec4f e = edgeList[i];
Point2f pt0(e[0], e[1]);
Point2f pt1(e[2], e[3]);
if (mappts.count(pt0) == 0 || mappts.count(pt1) == 0) {
// Not a valid point
continue;
}
int idx0 = mappts[pt0];
int idx1 = mappts[pt1];
// Symmetric matrix
adj(idx0, idx1) = 1;
adj(idx1, idx0) = 1;
}
return adj;
}
int main()
{
Mat1f points(10, 2);
randu(points, 0, 99);
int rows = 100, cols = 100;
Mat3b im(rows, cols, Vec3b(0,0,0));
Mat1b adj = delaunay(points, rows, cols);
for (int i = 0; i < points.rows; i++)
{
int xi = points.at<float>(i, 0);
int yi = points.at<float>(i, 1);
/// Draw the edges
for (int j = i + 1; j < points.rows; j++)
{
if (adj(i, j))
{
int xj = points(j, 0);
int yj = points(j, 1);
line(im, Point(xi, yi), Point(xj, yj), Scalar(255, 0, 0), 1);
}
}
}
for (int i = 0; i < points.rows; i++)
{
int xi = points(i, 0);
int yi = points(i, 1);
/// Draw the nodes
circle(im, Point(xi, yi), 1, Scalar(0, 0, 255), -1);
}
imshow("im", im);
waitKey();
return 0;
}
I want to modify a part of a multi-dimensional matrix using openCV. Basically I want to achieve the same as written in Matlab:
A = zeros(5,5,25);
A(:,:,1) = some_matrix1;
A(:,:,2) = some_matrix2;
I am not sure if I should use a 5x5 matrix with 25 channels or a 5x5x25 matrix with single channel. Here is what I tried:
int dim[3] = { 5,5,25 };
Mat A(3, dim, CV_32FC(1), Scalar::all(0));
A(Range::all(),Range::all(),0) = some_matrix;
But it seems like I can only use Range for two dimensions.
Or
Mat A(5, 5, CV_32FC(25), Scalar::all(0));
A(Range::all(),Range::all())[0] = some_matrix;
But in this case, I don't know how to access the channel.
Can you please help me with it?
OpenCV is optimized for 2D matrices. Multidimensional matrix will work, but are rather inefficient and difficult to access.
This example code will show you how to write and read values from an 3D matrix:
#include <opencv2\opencv.hpp>
using namespace cv;
int main()
{
int sizes[] = { 5, 5, 25 };
Mat data(3, sizes, CV_32F);
Mat1f some_matrix(sizes[0], sizes[1]);
randu(some_matrix, 0.f, 100.f); // some random values
// Init data with each plane a constant increasing value
for (int z = 0; z < data.size[2]; ++z)
{
// Set each z-plane to some scalar value
Range ranges[] = { Range::all(), Range::all(), Range(z, z + 1) };
data(ranges) = data.size[2] - z;
}
// Set the n-th z-plane to some_matrix
int z = 0;
for (int r = 0; r < sizes[0]; ++r)
{
for (int c = 0; c < sizes[1]; ++c)
{
data.at<float>(r, c, z) = some_matrix(r, c);
}
}
// Access all slices along z dimension
for (int z = 0; z < data.size[2]; ++z)
{
Range ranges[] = { Range::all(), Range::all(), Range(z, z + 1) };
Mat slice3d(data(ranges).clone()); // with clone slice is continuous, but still 3d
Mat slice(2, &data.size[0], data.type(), slice3d.data);
}
return 0;
}
However, it's far easier and practical to store your 5x5x25 3D matrix as a std::vector<Mat>, where the vector has length 25, and each matrix is a 2D 5x5.
See the code:
#include <opencv2\opencv.hpp>
using namespace cv;
int main()
{
int sizes[] = { 5, 5, 25 };
vector<Mat> data(sizes[2]);
// Init data with each plane a constant increasing value
for (int z = 0; z < sizes[2]; ++z)
{
data[z] = Mat(sizes[0], sizes[1], CV_32F, float(sizes[2] - z));
}
Mat1f some_matrix(sizes[0], sizes[1]);
randu(some_matrix, 0.f, 100.f); // some random values
// Set the n-th z-plane to some_matrix
int z = 0;
data[z] = some_matrix;
return 0;
}
Here is the piece of code to access the pixel from the channel, you can try it.
int dim[3] = { 5,5,25 };
Mat A(3, dim, CV_32FC1, Scalar::all(0));
for (int m = 0; m < 5; m++)
{
for (int n = 0; n < 5; n++)
{
for (int a = 0; a < 25; a++) // no of channels
{
cout << A.at<cv::Vec3f>(m,n)[a] << endl;
}
}
}
Hello I'm trying to find characters on this image.
This is my image after some preprocessing I recieved this image.
Now I'm trying to do connected component labeling to find blobs. however I get a lot of small blobs too.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs);
int main(int argc, char **argv)
{
Mat img = imread("adaptive.png", 0);
if(!img.data) {
cout << "File not found" << endl;
return -1;
}
namedWindow("binary");
namedWindow("labelled");
Mat output = Mat::zeros(img.size(), CV_8UC3);
Mat binary;
vector < vector<Point2i > > blobs;
threshold(img, binary, 0, 1, THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<Vec3b>(y,x)[0] = b;//Vec3b RGB color order
output.at<Vec3b>(y,x)[1] = g;
output.at<Vec3b>(y,x)[2] = r;
}
}
imshow("binary", img);
imshow("labelled", output);
waitKey(0);
return 0;
}
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs)
{
blobs.clear();
Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
Rect rect;
floodFill(label_image, Point(x,y), label_count, &rect, 0, 0, 4);
vector <Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
so with this algorithm I recieve blobs
but when I do
if(blobs.size()>50) {
blob.push_back(Point2i(j,i));
}
I recieve black screen. however when I try to
if(blob.size()<50){
blob.push_back(Point2i(j,i));
}
I recieve small blobs what can be the actual problem here ?
Guess you want to store those "big" blobs?
If so, change the following code
blobs.push_back(blob);
label_count++;
to this:
if(blob.size() > 50){
blobs.push_back(blob);
}
label_count++;
And you can receive picture like this:
From an (2)equalized image I have to create a (3).
Original image: http://i.imgur.com/X5MKF6z.jpg
Equalized image : http://i.imgur.com/oFBVUJp.png
Equalized and Stretch image: http://i.imgur.com/V7jeaRQ.png
With OpenCV I could have used equalizeHist() that does both equalization and stretching.
So without using OPENCV, how can I do stretching from an equalization image. The equalization part is done below.
#include <iostream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv/highgui.h>
#include <cstring>
using std::cout;
using std::cin;
using std::endl;
using namespace cv;
void imhist(Mat image, int histogram[])
{
// initialize all intensity values to 0
for (int i = 0; i < 256; i++)
{
histogram[i] = 0;
}
// calculate the no of pixels for each intensity values
for (int y = 0; y < image.rows; y++)
for (int x = 0; x < image.cols; x++)
histogram[(int)image.at<uchar>(y, x)]++;
}
void cumhist(int histogram[], int cumhistogram[])
{
cumhistogram[0] = histogram[0];
for (int i = 1; i < 256; i++)
{
cumhistogram[i] = histogram[i] + cumhistogram[i - 1];
}
}
int main()
{
// Load the image
Mat image = imread("y1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
// Generate the histogram
int histogram[256];
imhist(image, histogram);
// Caluculate the size of image
int size = image.rows * image.cols;
float alpha = 255.0 / size;
// Calculate the probability of each intensity
float PrRk[256];
for (int i = 0; i < 256; i++)
{
PrRk[i] = (double)histogram[i] / size;
}
// Generate cumulative frequency histogram
int cumhistogram[256];
cumhist(histogram, cumhistogram);
// Scale the histogram
int Sk[256];
for (int i = 0; i < 256; i++)
{
Sk[i] = cvRound((double)cumhistogram[i] * alpha);
}
// Generate the equlized image
Mat new_image = image.clone();
for (int y = 0; y < image.rows; y++)
for (int x = 0; x < image.cols; x++)
new_image.at<uchar>(y, x) = saturate_cast<uchar>(Sk[image.at<uchar>(y, x)]);
//////////////////////////////////////////
// // Generate the histogram stretched image
Mat str_image = new_image.clone();
//for (int a = 0; a < str_image.rows; a++)
// for (int b = 0; b < str_image.cols; b++)
// Display the original Image
namedWindow("Original Image");
imshow("Original Image", image);
// Display equilized image
namedWindow("Equalized Image");
imshow("Equalized Image", new_image);
waitKey();
return 0;
}
The normal way to do this is to find your darkest pixel, and your brightest. You can do this in a singe loop iterating over all your pixels, pseudo-code like this:
darkest=pixel[0,0] // assume first pixel is darkest for now, and overwrite later
brightest=pixel[0,0] // assume first pixel is lightest for now, and overwrite later
for all pixels
if this pixel < darkest
darkest = this pixel
else if this pixel > brightest
brightest = this pixel
endif
end for
Simple enough. So, let's say the darkest and brightest are 80 and 220 respectively. Now you need to stretch this range 80..220 onto the full range 0..255.
So you subtract 80 from every pixel in your image to shift down to zero at the left end of the histogram, so your range is now 0..140. So now you need to multiply every pixel by 255/140 to stretch the right end out to 255. Of course, you can do both pieces of arithmetic in a single pass over your pixel array.
for all pixels
newvalue = int((current value - darkest)*255/(brightest-darkest))
end for
What is the meaning of floating range and fixed range given in the documentation of floodfill function??
I used the floodfill function to a grayscale image shown below. The image has three regions of varying intensities.
outer rectangle = 170
inner ellipse = 175
inner rectangle = 180
I want to floodfill the regions of 170 and 175 together as single connected component and region with 180 as separate one.
I modified the code from here and function as follows:
#include <iostream>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs);
int main(int argc, char **argv)
{
cv::Mat img = cv::imread("blob.png", 0); // force greyscale
if(!img.data) {
std::cout << "File not found" << std::endl;
return -1;
}
cv::namedWindow("binary");
cv::namedWindow("labelled");
cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3);
cv::Mat binary=img.clone();
std::vector < std::vector<cv::Point2i > > blobs;
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<cv::Vec3b>(y,x)[0] = b;
output.at<cv::Vec3b>(y,x)[1] = g;
output.at<cv::Vec3b>(y,x)[2] = r;
}
}
cv::imshow("binary", img);
cv::imshow("labelled", output);
cv::waitKey(0);
return 0;
}
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs)
{
blobs.clear();
cv::Mat label_image;
binary.convertTo(label_image, CV_32FC1);
int label_count = 2;
for(int y=0; y < binary.rows; y++) {
{
for(int x=0; x < binary.cols; x++) {
{ if((int)label_image.at<float>(y,x) < 150) { //start labelling only when pixel > 150
{
continue;
}
cv::Rect rect;
cv::floodFill(label_image, cv::Point(x,y), cv::Scalar(label_count), &rect, cv::Scalar(0), cv::Scalar(6), 4+CV_FLOODFILL_FIXED_RANGE);
std::vector <cv::Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
{ for(int j=rect.x; j < (rect.x+rect.width); j++) {
{ if((int)label_image.at<float>(i,j) != label_count) {
{ continue;
}
blob.push_back(cv::Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
I used fixed range using the flag CV_FLOODFILL_FIXED_RANGE(is it correct the way I used??)
I specify the loDiff=0 and upDiff=6.
I expected that when seed becomes 170, all points in range 170-0 to 170+6 ie 170 to 176 (outer rectangle & inner ellipse) are floodfilled with same label and since inner rectangle is 180, it would have different label.
However I get the output as below:-
the outer rectangle and inner ellipse are not having the same label. What might be the mistake?
expected o/p : inner ellipse also be of orange color(same as outer rectangle)