Floodfill function in OpenCV - c++

What is the meaning of floating range and fixed range given in the documentation of floodfill function??
I used the floodfill function to a grayscale image shown below. The image has three regions of varying intensities.
outer rectangle = 170
inner ellipse = 175
inner rectangle = 180
I want to floodfill the regions of 170 and 175 together as single connected component and region with 180 as separate one.
I modified the code from here and function as follows:
#include <iostream>
#include <vector>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs);
int main(int argc, char **argv)
{
cv::Mat img = cv::imread("blob.png", 0); // force greyscale
if(!img.data) {
std::cout << "File not found" << std::endl;
return -1;
}
cv::namedWindow("binary");
cv::namedWindow("labelled");
cv::Mat output = cv::Mat::zeros(img.size(), CV_8UC3);
cv::Mat binary=img.clone();
std::vector < std::vector<cv::Point2i > > blobs;
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<cv::Vec3b>(y,x)[0] = b;
output.at<cv::Vec3b>(y,x)[1] = g;
output.at<cv::Vec3b>(y,x)[2] = r;
}
}
cv::imshow("binary", img);
cv::imshow("labelled", output);
cv::waitKey(0);
return 0;
}
void FindBlobs(const cv::Mat &binary, std::vector < std::vector<cv::Point2i> > &blobs)
{
blobs.clear();
cv::Mat label_image;
binary.convertTo(label_image, CV_32FC1);
int label_count = 2;
for(int y=0; y < binary.rows; y++) {
{
for(int x=0; x < binary.cols; x++) {
{ if((int)label_image.at<float>(y,x) < 150) { //start labelling only when pixel > 150
{
continue;
}
cv::Rect rect;
cv::floodFill(label_image, cv::Point(x,y), cv::Scalar(label_count), &rect, cv::Scalar(0), cv::Scalar(6), 4+CV_FLOODFILL_FIXED_RANGE);
std::vector <cv::Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
{ for(int j=rect.x; j < (rect.x+rect.width); j++) {
{ if((int)label_image.at<float>(i,j) != label_count) {
{ continue;
}
blob.push_back(cv::Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
I used fixed range using the flag CV_FLOODFILL_FIXED_RANGE(is it correct the way I used??)
I specify the loDiff=0 and upDiff=6.
I expected that when seed becomes 170, all points in range 170-0 to 170+6 ie 170 to 176 (outer rectangle & inner ellipse) are floodfilled with same label and since inner rectangle is 180, it would have different label.
However I get the output as below:-
the outer rectangle and inner ellipse are not having the same label. What might be the mistake?
expected o/p : inner ellipse also be of orange color(same as outer rectangle)

Related

Coding dedicated function Average Filter to color Images C++ OpenCV

So basically, I have to code my own function in C++ with OpenCV, that will apply average filter on both gray and color images.
The function returns a Mat Object, have a mat Object and the size of the average filter (3 for 3x3 matrix of filtering for example).
I did this for the moment, it doesn't work, and I don't know how to extend it to color.
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
Mat filtrageMoyen(Mat image, int tailleZonage) {
Mat imageRetour;
imageRetour = image.clone();
Scalar intensite = 0;
int cadrillage = tailleZonage / 2;
int valeurMoyenne = 0;
for (size_t x = 0; x < imageRetour.rows; x++)
{
for (size_t y = 0; y < imageRetour.cols; y++)
{
for (size_t xZonage = 0; xZonage < cadrillage; xZonage++)
{
for (size_t yZonage = 0; yZonage < cadrillage; yZonage++)
{
valeurMoyenne += (image.at<unsigned char>(x+xZonage, y + yZonage));
}
}
imageRetour.at<unsigned char>(x, y) = valeurMoyenne;
valeurMoyenne = 0;
}
}
return imageRetour;
}
int main() {
Mat img;
string filename = "imageRickRoll.png";
img = imread(filename, cv::IMREAD_GRAYSCALE);
imshow("Image filtree", filtrageMoyen(img, 5));
waitKey(0);
return 0;
}

C++ OpenCV How can I get the values of Binary Histogram image 0 and 255?

My code shows me values that are not accurate and i am not sure what else to try. My goal is to get the values of y such as rows, so that I can read the image and put it in an array. Ive looked at examples and Stack Overflow is literally my last option.
#include<iostream>
#include<opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
using namespace std;
using namespace cv;
int main()
{
Mat Rgb;
Mat Grey;
Mat image;
//Mat Histogram;
Rgb = imread("license.jpg", WINDOW_AUTOSIZE);
cvtColor(Rgb, Grey, cv::COLOR_BGR2GRAY);
threshold(Grey, image, 150, 250, THRESH_BINARY);
int histogram[255];
for (int i = 0; i < 255; i++)
{
histogram[i] = 0;
}
for (int y = 0; y < image.rows; y++)
//for (int x = 0; x < image.cols; x++)
histogram[(int)image.at<uchar>(y)]++;
//histogram[(int)image.at<uchar>(y, x)]++;
for (int i = 0; i < 255; i++)
cout << histogram[i] << " ";
// draw the histograms
int hist_w = 512; int hist_h = 400;
int bin_w = cvRound((double)hist_w / 255);
Mat histImage(hist_h, hist_w, CV_8UC1, Scalar(255, 255, 255));
int max = histogram[0];
for (int i = 1; i < 256; i++) {
if (max < histogram[i]) {
max = histogram[i];
}
}
for (int i = 0; i < 255; i++) {
histogram[i] = ((double)histogram[i] / max)*histImage.rows;
}
for (int i = 0; i < 255; i++)
{
line(histImage, Point(bin_w*(i), hist_h),
Point(bin_w*(i), hist_h - histogram[i]),
Scalar(0, 0, 0), 1, 8, 0);
}
imshow("Image", image);
waitKey(0);
cv::destroyAllWindows();
return 0;
}
Results have numbers like 319 and other values and I am only looking to get 0 or 255

from float array to mat , concatenate blocks of image

I have an image 800x800 which is broken down to 16 blocks of 200x200.
(you can see previous post here)
These blocks are : vector<Mat> subImages;
I want to use float pointers on them , so I am doing :
float *pdata = (float*)( subImages[ idxSubImage ].data );
1) Now, I want to be able to get again the same images/blocks, going from float array to Mat data.
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
namedWindow( "Display window", WINDOW_AUTOSIZE );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
imshow( "Display window", mf );
waitKey(0);
}
}
So , the problem is that I am receiving an
OpenCV Error: Assertion failed
in imshow.
2) How can I recombine all the blocks to obtain the original 800x800 image?
I tried something like:
int Idx = 0;
pdata = (float*)( subImages[ Idx ].data );
Mat big( 800,800,CV_32F );
for( int i = 0; i < OriginalImgSize.height - 4; i+= 200 )
{
for( int j = 0; j < OriginalImgSize.width - 4; j+= 200, Idx++ )
{
Mat mf( i,j, CV_32F, pdata + 200 );
Rect roi(j,i,200,200);
mf.copyTo( big(roi) );
}
}
imwrite( "testing" , big );
This gives me :
OpenCV Error: Assertion failed (!fixedSize()) in release
in mf.copyTo( big(roi) );.
First, you need to know where are your subimages into the big image. To do this, you can save the rect of each subimage into the vector<Rect> smallImageRois;
Then you can use pointers (keep in mind that subimages are not continuous), or simply use copyTo to the correct place:
Have a look:
#include <opencv2\opencv.hpp>
#include <vector>
using namespace std;
using namespace cv;
int main()
{
Mat3b img = imread("path_to_image");
resize(img, img, Size(800, 800));
Mat grayImg;
cvtColor(img, grayImg, COLOR_BGR2GRAY);
grayImg.convertTo(grayImg, CV_32F);
int N = 4;
if (((grayImg.rows % N) != 0) || ((grayImg.cols % N) != 0))
{
// Error
return -1;
}
Size graySize = grayImg.size();
Size smallSize(grayImg.cols / N, grayImg.rows / N);
vector<Mat> smallImages;
vector<Rect> smallImageRois;
for (int i = 0; i < graySize.height; i += smallSize.height)
{
for (int j = 0; j < graySize.width; j += smallSize.width)
{
Rect rect = Rect(j, i, smallSize.width, smallSize.height);
smallImages.push_back(grayImg(rect));
smallImageRois.push_back(rect);
}
}
// Option 1. Using pointer to subimage data.
Mat big1(800, 800, CV_32F);
int big1step = big1.step1();
float* pbig1 = big1.ptr<float>(0);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
float* pdata = (float*)smallImages[idx].data;
int step = smallImages[idx].step1();
Rect roi = smallImageRois[idx];
for (int i = 0; i < smallSize.height; ++i)
{
for (int j = 0; j < smallSize.width; ++j)
{
pbig1[(roi.y + i) * big1step + (roi.x + j)] = pdata[i * step + j];
}
}
}
// Option 2. USing copyTo
Mat big2(800, 800, CV_32F);
for (int idx = 0; idx < smallImages.size(); ++idx)
{
smallImages[idx].copyTo(big2(smallImageRois[idx]));
}
return 0;
}
For concatenating the sub-images into a single squared image, you can use the following function:
// Important: all patches should have exactly the same size
Mat concatPatches(vector<Mat> &patches) {
assert(patches.size() > 0);
// make it square
const int patch_width = patches[0].cols;
const int patch_height = patches[0].rows;
const int patch_stride = ceil(sqrt(patches.size()));
Mat image = Mat::zeros(patch_stride * patch_height, patch_stride * patch_width, patches[0].type());
for (size_t i = 0, iend = patches.size(); i < iend; i++) {
Mat &patch = patches[i];
const int offset_x = (i % patch_stride) * patch_width;
const int offset_y = (i / patch_stride) * patch_height;
// copy the patch to the output image
patch.copyTo(image(Rect(offset_x, offset_y, patch_width, patch_height)));
}
return image;
}
It takes a vector of sub-images (or patches as I refer them to) and concatenates them into a squared image. Example usage:
vector<Mat> patches;
vector<Scalar> colours = {Scalar(255, 0, 0), Scalar(0, 255, 0), Scalar(0, 0, 255)};
// fill vector with circles of different colours
for(int i = 0; i < 16; i++) {
Mat patch = Mat::zeros(100,100, CV_32FC3);
circle(patch, Point(50,50), 40, colours[i % 3], -1);
patches.push_back(patch);
}
Mat img = concatPatches(patches);
imshow("img", img);
waitKey();
Will produce the following image
print the values of i and j before creating Mat mf and I believe you will soon be able to find the error.
Hint 1: i and j will be 0 the first time
Hint 2: Use the copyTo() with a ROI like:
cv::Rect roi(0,0,200,200);
src.copyTo(dst(roi))
Edit:
Hint 3: Try not to do such pointer fiddling, you will get in trouble. Especially if you're ignoring the step (like you seem to do).

Finding only big blobs on image

Hello I'm trying to find characters on this image.
This is my image after some preprocessing I recieved this image.
Now I'm trying to do connected component labeling to find blobs. however I get a lot of small blobs too.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs);
int main(int argc, char **argv)
{
Mat img = imread("adaptive.png", 0);
if(!img.data) {
cout << "File not found" << endl;
return -1;
}
namedWindow("binary");
namedWindow("labelled");
Mat output = Mat::zeros(img.size(), CV_8UC3);
Mat binary;
vector < vector<Point2i > > blobs;
threshold(img, binary, 0, 1, THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<Vec3b>(y,x)[0] = b;//Vec3b RGB color order
output.at<Vec3b>(y,x)[1] = g;
output.at<Vec3b>(y,x)[2] = r;
}
}
imshow("binary", img);
imshow("labelled", output);
waitKey(0);
return 0;
}
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs)
{
blobs.clear();
Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
Rect rect;
floodFill(label_image, Point(x,y), label_count, &rect, 0, 0, 4);
vector <Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
so with this algorithm I recieve blobs
but when I do
if(blobs.size()>50) {
blob.push_back(Point2i(j,i));
}
I recieve black screen. however when I try to
if(blob.size()<50){
blob.push_back(Point2i(j,i));
}
I recieve small blobs what can be the actual problem here ?
Guess you want to store those "big" blobs?
If so, change the following code
blobs.push_back(blob);
label_count++;
to this:
if(blob.size() > 50){
blobs.push_back(blob);
}
label_count++;
And you can receive picture like this:

Weird result from the Kuwahara filter

I am implementing a Kuwahara filter in C++, with OpenCV to help opening and displaying images. The idea is quite straight forward but somehow I got weird result from it. Here' the cose:
#include "opencv2/opencv.hpp"
#include <iostream>
#include <iomanip>
#include <cmath>
using namespace std;
using namespace cv;
//This class is essentially a struct of 4 Kuwahara regions surrounding a pixel, along with each one's mean, sum and variance.
class Regions{
int* Area[4];
int Size[4];
unsigned long long Sum[4];
double Var[4];
int kernel;
public:
Regions(int _kernel) : kernel(_kernel) {
for (int i = 0; i<4; i++) {
Area[i] = new int[kernel*kernel];
Size[i] = 0;
Sum[i] = 0;
Var[i] = 0.0;
}
}
//Update data, increase the size of the area, update the sum
void sendData(int area, int data){
Area[area][Size[area]] = data;
Sum[area] += data;
Size[area]++;
}
//Calculate the variance of each area
double var(int area) {
int __mean = Sum[area]/Size[area];
double temp = 0;
for (int i = 0; i<Size[area]; i++) {
temp+= (Area[area][i] - __mean) * (Area[area][i] - __mean);
}
if (Size[area]==1) return 1.7e38; //If there is only one pixel inside the region then return the maximum of double
//So that with this big number, the region will never be considered in the below minVar()
return sqrt(temp/(Size[area]-1));
}
//Call the above function to calc the variances of all 4 areas
void calcVar() {
for (int i = 0; i<4; i++) {
Var[i] = var(i);
}
}
//Find out which regions has the least variance
int minVar() {
calcVar();
int i = 0;
double __var = Var[0];
if (__var > Var[1]) {__var = Var[1]; i = 1;}
if (__var > Var[2]) {__var = Var[2]; i = 2;}
if (__var > Var[3]) {__var = Var[3]; i = 3;}
return i;
}
//Return the mean of that regions
uchar result(){
int i = minVar();
return saturate_cast<uchar> ((double) (Sum[i] *1.0 / Size[i]));
}
};
class Kuwahara{
private:
int wid, hei, pad, kernel;
Mat image;
public:
Regions getRegions(int x, int y){
Regions regions(kernel);
uchar *data = image.data;
//Update data for each region, pixels that are outside the image's boundary will be ignored.
//Area 1 (upper left)
for (int j = (y-pad >=0)? y-pad : 0; j>= 0 && j<=y && j<hei; j++)
for (int i = ((x-pad >=0) ? x-pad : 0); i>= 0 && i<=x && i<wid; i++) {
regions.sendData(1,data[(j*wid)+i]);
}
//Area 2 (upper right)
for (int j = (y-pad >=0)? y-pad : 0; j<=y && j<hei; j++)
for (int i = x; i<=x+pad && i<wid; i++) {
regions.sendData(2,data[(j*wid)+i]);
}
//Area 3 (bottom left)
for (int j = y; j<=y+pad && j<hei; j++)
for (int i = ((x-pad >=0) ? x-pad : 0); i<=x && i<wid; i++) {
regions.sendData(3,data[(j*wid)+i]);
}
//Area 0 (bottom right)
for (int j = y; j<=y+pad && j<hei; j++)
for (int i = x; i<=x+pad && i<wid; i++) {
regions.sendData(0,data[(j*wid)+i]);
}
return regions;
}
//Constructor
Kuwahara(const Mat& _image, int _kernel) : kernel(_kernel) {
image = _image.clone();
wid = image.cols; hei = image.rows;
pad = kernel-1;
}
//Create new image and replace its pixels by the results of Kuwahara filter on the original pixels
Mat apply(){
Mat temp;
temp.create(image.size(), CV_8U);
uchar* data = temp.data;
for (int j= 0; j<hei; j++) {
for (int i = 0; i<wid; i++)
data[j*wid+i] = getRegions(i,j).result();
}
return temp;
}
};
int main() {
Mat img = imread("limes.tif", 1);
Mat gray, dest;
int kernel = 15;
gray.create(img.size(), CV_8U);
cvtColor(img, gray, CV_BGR2GRAY);
Kuwahara filter(gray, kernel);
dest = filter.apply();
imshow("Result", dest);
imwrite("result.jpg", dest);
waitKey();
}
And here's the result:
As you can see it's different from the correct result, the borders of those limes seem to be duplicated and moved upward. If I apply a 15x15 filter, it gives me a complete mess like this:
I've spent my whole day to debug, but so far nothing is found. I even did the calculation on small images by hand and compare with the result and see no differences.
Could anyone help me find out what did I do wrong?
Many many thanks.
It turns out that there's nothing wrong with my code, but the way I defined a kernel was the source of problem. My kernel is actually one of four small kuwahara sections, while the correct definition of a kernel is the whole area where data is calculated for each pixel, therefore the area that contains all four sections is actually the kernel. So when talked about a 7x7 "kernel", I actually applied a 15x15 one, and the horrible result came not from a 15x15 kernel as I thought, but from a 31x31. At that size, Kuwahara filter simply doesn't make sense and bizarre results are inevitable.