I would like some help regarding passing a code that is in matlab to opencv c ++. I am trying to do some operations with the RGB channels, however, the value of thresh is not being the same - I am sending the same image. Could someone please help me?
MATLAB
im = imread('1.png');
[m,n,p] = size(im);
R=im(:, :, 1);
G=im(:, :, 2);
B=im(:, :, 3);
thresh=0;
for j=1:n
for i=1:m
thresh = thresh + double((1.262*G(i,j))-(0.884*R(i,j))-(0.311*B(i,j)));
end
end
C++
#include <opencv2/opencv.hpp>
#include "opencv2/highgui.hpp"
#include <opencv2/core/mat.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main(){
Mat img = imread("1.png", IMREAD_COLOR);
int thresh = 0;
for(int j = 0; j <= img.cols; j++){
for(int i = 0; i <= img.rows; i++){
Vec3b color = img.at<Vec3b>(i,j);
uchar a = color.val[0], b = color.val[1], c = color.val[2];
thresh += double((1.262*b)-(0.884*c)-(0.311*a));
}
}
cout << thresh;
return 0;
}
First mistake is in the upper values of for loops because you are exceeding the range of image borders.
j <= img.cols should be j < img.cols and
i <= img.rows should be i < img.rows
Second mistake is that you don't make explicit type conversion for your uchar type pixel values
thresh += double((1.262*b)-(0.884*c)-(0.311*a));
should be
thresh += double((1.262*static_cast<double>(b))
-(0.884*static_cast<double>(c))
-(0.311*static_cast<double>(a)));
Here is the whole code I tried:
#include <opencv2/opencv.hpp>
#include "opencv2/highgui.hpp"
#include <opencv2/core/mat.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat img = imread("img.jpg", IMREAD_COLOR);
double thresh = 0.0;
resize(img,img,Size(100,100));
for(int j = 0; j < img.cols; j++){
for(int i = 0; i < img.rows; i++){
// 1ST WAY
Vec3b color = img.at<Vec3b>(i,j);
uchar a = color.val[0], b = color.val[1], c = color.val[2];
thresh += double((1.262*static_cast<double>(b))
-(0.884*static_cast<double>(c))
-(0.311*static_cast<double>(a)));
// 2ND WAY
// thresh += double((1.262 * (double)img.at<Vec3b>(Point(i,j))[1])
// - (0.884*(double)img.at<Vec3b>(Point(i,j))[2])
// - (0.311 * (double)img.at<Vec3b>(Point(i,j))[0]));
}
}
cout << thresh << endl;
return 0;
}
Related
I'm trying to make the Sharpening of an Image in HLS format.
I've done the Blurring correctly, but the Sharpening doesn't work.
I know the Sharpening is:
1) Blur the Image: Image -> Blurred. 2) Make Unsharp Mask: Unsharp_Mask = Image - Blurred. 3) Sharpen the Image: Sharpened = Image + Unsharp_Mask
Also I know in HLS you don't have to do this in every channel, but just in the "L" one.
I did it, but it doesn't work.
This is my code (i can't use the "code" button cause it gives me error - says that there are parts of the code that are not properly formatted as code):
// UNSHARP MASK HLS Mat* UnsharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type()); Mat* SharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type());
for (int i = 0; i < ImageHLS.rows; i++) {
for (int j = 0; j < ImageHLS.cols; j++)
{
UnsharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] - PaddedHLS->at<Vec3b>(i + 1, j + 1)[1];
SharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] + (UnsharpHLS->at<Vec3b>(i + 1, j + 1)[1]);
} }
cvtColor(*SharpHLS, Sharpened, COLOR_HLS2BGR);
Let's assume the previous part of the code works (I don't get any error and I've already tryed it), the only problem is in the mentioned code.
This is the whole code:
#include <iostream>
#include <opencv2/opencv.hpp>
#include <cstdlib>
#include <math.h>
using namespace cv;
using namespace std;
int main()
{
// CARICAMENTO IMMAGINE
Mat Original = imread("Lena.png", IMREAD_COLOR);
// VERIFICA SE L'IMMAGINE E' STATA LETTA CORRETTAMENTE, IN CASO CONTRARIO RITORNA -1
if (Original.empty())
{
return -1;
}
// CONVERSIONE COLORI
Mat ImageHLS;
Mat ImageRGB;
Mat Blurred;
Mat Sharpened;
cvtColor(Original, ImageRGB, COLOR_BGR2RGB);
cvtColor(Original, ImageHLS, COLOR_BGR2HLS);
// CREAZIONE IMMAGINE HLS PADDED
int FilterSize = 3;
int Padding = FilterSize - 1;
Mat* PaddedHLS = new Mat(ImageHLS.rows + Padding, ImageHLS.cols + Padding, ImageHLS.type());
copyMakeBorder(ImageHLS, *PaddedHLS, Padding / 2, Padding / 2, Padding / 2, Padding / 2, BORDER_DEFAULT);
// BLURRING SU IMMAGINE HLS PADDED
Mat* Filter = new Mat(FilterSize, FilterSize, ImageHLS.type());
for (int i = 1; i < PaddedHLS->rows - 1; i++)
{
for (int j = 1; j < PaddedHLS->cols - 1; j++)
{
for (int x = 0; x < FilterSize; x++)
{
for (int y = 0; y < FilterSize; y++)
{
Filter->at<Vec3b>(x, y)[1] = PaddedHLS->at<Vec3b>(i - 1 + x, j - 1 + y)[1];
}
}
PaddedHLS->at<Vec3b>(i, j)[1] = mean(*Filter).val[1];
}
}
cvtColor(*PaddedHLS, Blurred, COLOR_HLS2BGR);
// UNSHARP MASK HLS
Mat* UnsharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type());
Mat* SharpHLS = new Mat(PaddedHLS->rows, PaddedHLS->cols, PaddedHLS->type());
for (int i = 0; i < ImageHLS.rows; i++)
{
for (int j = 0; j < ImageHLS.cols; j++)
{
UnsharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] - PaddedHLS->at<Vec3b>(i + 1, j + 1)[1];
SharpHLS->at<Vec3b>(i+1, j+1)[1] = ImageHLS.at<Vec3b>(i, j)[1] + (UnsharpHLS->at<Vec3b>(i + 1, j + 1)[1]);
}
}
cvtColor(*SharpHLS, Sharpened, COLOR_HLS2BGR);
// VISUALIZZAZIONE IMMAGINI
imshow("Originale", Original);
imshow("Image RGB", ImageRGB);
imshow("Image HLS", ImageHLS);
imshow("Blurred HLS", *PaddedHLS);
imshow("Blurred BGR", Blurred);
imshow("Unsharp HLS", *UnsharpHLS);
imshow("Sharpened HLS", *SharpHLS);
imshow("Sharpened BGR", Sharpened);
//CHIUDI TUTTO
waitKey(0);
destroyAllWindows();
}
I'm new using C++ and a need to compute the average of the red color in each column. Subsequently, I need to make a graph of the color density level per column.
This is the picture that i using, is a sample of a bone densitomery:
This is my code so far:
#include <opencv2/opencv.hpp>
#include <string>
#include <vector>
#include <iostream>
#include <windows.h>
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
FARPROC pGetPixel;
std::cout << "Medidas de muestra 1: \n";
Mat img = imread("C:\\Users\\Jimena G. Gordillo\\OneDrive\\Pictures\\den.bmp");
int matriz = img.cols * img.rows * 3;
const size_t chanels = 3; //RGB
//Lectura de cada pixel
for (int x = 0; x < img.cols; x++) {
for (int y = 0; y < img.rows; y++) {
size_t p = y * img.cols * chanels + x * chanels; //
uchar b = img.data[p + 0];
uchar g = img.data[p + 1];
uchar r = img.data[p + 2];
for (int i = 0; i <= img.cols; i++) { //here is where I want to obtain the sum of each column
int sum = sum + i;
//Calculate average
long long average = sum / img.rows; // average is divided by the total of rows
}
}
}
cout << "Filas: " << img.rows<< endl;
cout << "Columnas: " << img.cols << endl;
cout << "Area: " << matriz << endl;
namedWindow("imagen", WINDOW_AUTOSIZE);
imshow("imagen", img);
waitKey();
return 0;
}
Any help is appreciated.
You are trying to check row by row not column by column also the third for loop is not needed.
Here is the solution for each channel(red,green,blue) columns average and graph of the column averages for each channel.
Source:
Red Channel Graph for each column average:
Green Channel Graph for each column average:
Blue Channel Graph for each column average:
Code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace cv;
using namespace std;
int main()
{
Mat img = imread("/ur/source/image/directory/img.png");
imshow("Source",img);
long int avgRedChannel = 0,avgGreenChannel = 0, avgBlueChannel = 0;
Mat graphRedChannel = Mat::zeros(Size(img.cols,300),CV_8UC1);
Mat graphGreenChannel = Mat::zeros(Size(img.cols,300),CV_8UC1);
Mat graphBlueChannel = Mat::zeros(Size(img.cols,300),CV_8UC1);
for(int i=0;i<img.cols;i++)
{
for(int j=0;j<img.rows;j++)
{
avgBlueChannel += (int)img.at<Vec3b>(Point(i,j))[0];
avgGreenChannel += (int)img.at<Vec3b>(Point(i,j))[1];
avgRedChannel += (int)img.at<Vec3b>(Point(i,j))[2];
}
graphBlueChannel.at<uchar>(Point(i,(avgBlueChannel/img.rows))) = 255;
graphGreenChannel.at<uchar>(Point(i,(avgGreenChannel/img.rows))) = 255;
graphRedChannel.at<uchar>(Point(i,(avgRedChannel/img.rows))) = 255;
avgBlueChannel = 0;
avgGreenChannel = 0;
avgRedChannel = 0;
}
imshow("RedChannelGraph",graphRedChannel);
imshow("GreenChannelGraph",graphGreenChannel);
imshow("BlueChannelGraph",graphBlueChannel);
waitKey(0);
return(0);
}
I have a problem with initializing a 3D Mat with openCV.
I would like to create a 3D matrix of size (rows x cols x 16), rows and cols being the dimensions of an image given earlier in the program. I tried I can not say how many different methods, and all return to me more or less the same thing: the dimensions of my matrices are worth 0 or -858993460.
My code lines :
Mat image_Conv;
int rows = imageBicubic.rows;
int cols = imageBicubic.cols;
image_Conv = Mat::zeros(rows, cols, CV_32FC(16));
Can you tell me why I have this problem? Of course I read all the posts that speak, read the doc opencv on the class Mat, but nothing works, I still have the same problem. I specify that my data in the Mat will be float.
The code :
// Include standard headers
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <vector>
#include <ctime>
#include <iostream>
using namespace std;
//#include <opencv.hpp>
#include <opencv/cv.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv/highgui.h>
using namespace cv;
////////////////////////////////////////
// main file
int main()
{
string fileName = "myImage.jpg";
Mat imageSrc = cv::imread(fileName, CV_LOAD_IMAGE_UNCHANGED); // Read the file
if (!imageSrc.data) // Check for invalid input
{
cout << "Could not open or find the image\n";
return 1;
}
cout << "Loaded " << fileName << " (" << imageSrc.channels() << " channels)\n";
//int colorTransform = (imageSrc.channels() == 4) ? CV_BGRA2RGBA : (imageSrc.channels() == 3) ? CV_BGR2RGB : CV_GRAY2RGB;
//cv::cvtColor(imageSrc, imageSrc, colorTransform);
imageSrc.convertTo(imageSrc, CV_32F, 1 / 255.0, 0.0);
int SliceSizeWidth = imageSrc.cols / 2;
int sliceShiftWidth = imageSrc.cols / 4;
int sliceWidthNumber = (imageSrc.cols / sliceShiftWidth) - 1;
int SliceSizeHeight = imageSrc.rows / 2;
int sliceShiftHeight = imageSrc.rows / 4;
int sliceHeightNumber = (imageSrc.rows / sliceShiftHeight) - 1;
for (int sliceIndexHeight = 0; sliceIndexHeight < sliceHeightNumber; sliceIndexHeight++)
{
for (int sliceIndexWidth = 0; sliceIndexWidth < sliceWidthNumber; sliceIndexWidth++)
{
Mat patchImage = imageSrc(Rect(sliceIndexWidth*sliceShiftWidth, sliceIndexHeight*sliceShiftHeight, SliceSizeWidth, SliceSizeHeight));
Mat patchImageCopy;
patchImage.copyTo(patchImageCopy); // Deep copy => data are contiguous in patchImageCopy
Mat imageBicubic;
resize(patchImageCopy, imageBicubic, Size(2 * patchImage.cols, 2 * patchImage.rows), INTER_CUBIC);
Mat image_Padding;
int padding = 1;
copyMakeBorder(imageBicubic, image_Padding, padding, padding, padding, padding, BORDER_CONSTANT, Scalar(0));
Mat image_Conv;
int rows = imageBicubic.rows;
int cols = imageBicubic.cols;
image_Conv = Mat::zeros(rows, cols, CV_32FC(16));
/* rest of the code I have to write */
image_Conv.convertTo(image_Conv, CV_8U, 255.0, 0.0);
string nameBase = fileName.substr(0, fileName.find('.'));
string nameExt = fileName.substr(fileName.find('.'), fileName.length() - nameBase.length());
string strH = to_string(sliceIndexHeight);
string strW = to_string(sliceIndexWidth);
string outFileName = nameBase + "_H" + strH + "W" + strW + nameExt;
imwrite(outFileName, image_Conv);
}
}
return 0;
}
PS : Most of the code is not mine, I have to use it for my internship and can only edit between the lines :
resize(patchImageCopy, imageBicubic, Size(2 * patchImage.cols, 2 * patchImage.rows), INTER_CUBIC);
and
image_Conv.convertTo(image_Conv, CV_8U, 255.0, 0.0);
Thank you for your help !
EDIT : My first problem is solved, but it seems that it didn't work after all. I suppose that Mat::zeros set all the Mat elements at 0, right ? But if I write
cout << image_Conv.at<float>(0,0,0) << endl;
I have the error : "Unhandled exception at 0x000007FEFD4FA06D in xxxxxx.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000000023E540.".
I don't know what the problem is with the memory and how to fix it.
My goal is to fill my matrix element by element thanks to several for loops which will be realized several operations, before the result is written in the element of my corresponding Mat. I did that why 3D and 4D arrays, and maybe it's the easiest solution, to do all the calculs with arrays, but I can't go from a 3D array to a 3D Mat or a 3D Mat to a 3D array.
just tested this on visual studio 2015, opencv 3.4
cv::Mat mat = cv::Mat::zeros(5, 5, CV_32FC(16));
this works fine.
You should be able to create a multi-dimensional matrix filled with 0-values using:
int size[3] = { 5, 4, 3 };
cv::Mat M(3, size, CV_32F, cv::Scalar(0));
You can iterate over the matrix with M.at(i,j,k) (only for 3D matrix created as above):
for (int i = 0; i < size[0]; i++) {
for (int j = 0; j < size[1]; j++) {
for (int k = 0; k < size[2]; k++) {
M.at<float>(i,j,k) = i*12+j*3+k;
}
}
}
for (int i = 0; i < size[0]; i++) {
for (int j = 0; j < size[1]; j++) {
for (int k = 0; k < size[2]; k++) {
std::cout << "M(" << i << ", " << j << ", " << k << "): " << M.at<float>(i,j,k) << std::endl;
}
}
}
Alternatively, you should be able to create a 2D matrix with multiple channels with:
cv::Mat M(5, 4, CV_32FC(3), cv::Scalar(0));
To iterate over the 2D matrix and over the channels:
for (int i = 0; i < M.rows; i++) {
for (int j = 0; j < M.cols; j++) {
for (int k = 0; k < M.channels(); k++) {
M.at<cv::Vec<float, 3> >(i,j)[k] = i*M.cols*M.channels()+j*M.channels()+k;
}
}
}
I am using otsu threshold on an image.
Here is the input image :
Here is the output :
Here is the code I am using:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
title("Text Extractor");
string win_name = "textextractor";
Mat img_a;
img_a = imread("../input/test_c.jpg");
Mat img_a_gray;
cvtColor(img_a, img_a_gray, CV_BGR2GRAY);
Mat img_a_blur;
GaussianBlur(img_a_gray, img_a_blur, Size(3, 3), 0, 0);
Mat img_a_thres;
// adaptiveThreshold(img_a_blur, img_a_thres, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY, 5, 4);
threshold(img_a_blur, img_a_thres, 0, 255, THRESH_OTSU);
namedWindow(win_name + "_a", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_a", img_a_thres);
imwrite("../output/output_a.jpg", img_a_thres);
waitKey(0);
return 0;
}
The problem is that output has a black region on the bottom and on the left. What can I do to minimize/remove this ?
Edit:
I tried equalizeHist() and I am getting this:
Will try out breaking image into pieces and working them separately.
Sorry, my bad. The previous one is using adaptive filtering. Using Otsu I get this:
There is no change in otsu's output :/
Edit 2: Completed the Feng Tan algorithm, it gives better results but text looses clarity.
Code:
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/photo/photo.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <string>
#include <math.h>
using namespace std;
using namespace cv;
int main(int argc, char const *argv[]) {
string win_name = "textextractor";
Mat img_c;
img_c = imread("../input/sample.jpg");
Mat img_c_gray;
cvtColor(img_c, img_c_gray, CV_BGR2GRAY);
Mat img_c_bin = Mat::zeros(img_c_gray.rows, img_c_gray.cols, CV_8UC1);
int s_win = 17;
int l_win = 35;
double min_tau = 10;
Rect roi_s = Rect(-s_win/2, -s_win/2, s_win, s_win);
Rect roi_l = Rect(-l_win/2, -l_win/2, l_win, l_win);
Rect img_c_roi = Rect(0, 0, img_c_gray.cols, img_c_gray.rows);
for (size_t r = 0; r < img_c_gray.rows; r++) {
for (size_t c = 0; c < img_c_gray.cols; c++) {
double pthres = 255;
Rect sROI = roi_s + Point(c, r);
sROI = sROI & img_c_roi;
if(sROI.width == 0 || sROI.height == 0) {
continue;
}
Rect lROI = roi_l + Point(c, r);
lROI = lROI & img_c_roi;
if(lROI.width == 0 || lROI.height == 0) {
continue;
}
Mat sROI_gray = img_c_gray(sROI);
Mat lROI_gray = img_c_gray(lROI);
double s_stdDev = 0;
double l_stdDev = 0;
double s_mean = 0;
double l_mean = 0;
double l_min = DBL_MAX;
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
s_mean += sROI_gray.at<unsigned char>(r, c);
}
}
s_mean = s_mean / static_cast<double> (sROI_gray.cols * sROI_gray.rows);
for (size_t r = 0; r < sROI_gray.rows; r++) {
for (size_t c = 0; c < sROI_gray.cols; c++) {
double diff = sROI_gray.at<unsigned char> (r, c) - s_mean;
s_stdDev += diff * diff;
}
}
s_stdDev = sqrt(s_stdDev / static_cast<int> (sROI_gray.cols * sROI_gray.rows));
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
l_mean += lROI_gray.at<unsigned char> (c, r);
if(lROI_gray.at<unsigned char> (r, c) < l_min) {
l_min = lROI_gray.at<unsigned char> (r, c);
}
}
}
l_mean = l_mean / static_cast<double> (lROI_gray.cols * lROI_gray.rows);
for (size_t r = 0; r < lROI_gray.rows; r++) {
for (size_t c = 0; c < lROI_gray.cols; c++) {
double diff = lROI_gray.at<unsigned char> (r, c) - l_mean;
l_stdDev += diff * diff;
}
}
l_stdDev = sqrt(l_stdDev / static_cast<double> (lROI_gray.cols * lROI_gray.rows));
double tau = ((s_mean - l_min) * (1 - s_stdDev / l_stdDev)) / 2.0;
if(tau < min_tau) {
tau = min_tau;
}
double threshold = s_mean - tau;
unsigned char pixel_val = img_c_gray.at<unsigned char>(r, c);
if(pixel_val >= threshold) {
img_c_bin.at<unsigned char> (r, c) = 255;
} else {
img_c_bin.at<unsigned char> (r, c) = 0;
}
}
}
namedWindow(win_name + "_c", CV_WINDOW_AUTOSIZE);
imshow(win_name + "_c", img_c_bin);
imwrite("../output/output_c.jpg", img_c_bin);
waitKey(0);
return 0;
}
Output:
This is what I was able to obtain after some trial and run. Initially I median blurred the original image. Then I applied adpative threshold to the blurred image.
This is what I got:
1. Adaptive Threshold using Gaussian filter:
2. Adaptive Threshold using Mean filter:
From here on you can carry out a series of morphological operations that best suits your final image. :)
You should try using CLAHE.
I tried it on MATLAB using:
Ia = imread('FHXTJ.jpg');
I = rgb2gray(Ia);
A = adapthisteq(I, 'clipLimit', 0.02, 'Distribution', 'rayleigh');
Result:
Note: You can apply thresholding on this image. Otsu should work fine now.
Hello I'm trying to find characters on this image.
This is my image after some preprocessing I recieved this image.
Now I'm trying to do connected component labeling to find blobs. however I get a lot of small blobs too.
#include <iostream>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace cv;
using namespace std;
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs);
int main(int argc, char **argv)
{
Mat img = imread("adaptive.png", 0);
if(!img.data) {
cout << "File not found" << endl;
return -1;
}
namedWindow("binary");
namedWindow("labelled");
Mat output = Mat::zeros(img.size(), CV_8UC3);
Mat binary;
vector < vector<Point2i > > blobs;
threshold(img, binary, 0, 1, THRESH_BINARY_INV);
FindBlobs(binary, blobs);
// Randomy color the blobs
for(size_t i=0; i < blobs.size(); i++) {
unsigned char r = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char g = 255 * (rand()/(1.0 + RAND_MAX));
unsigned char b = 255 * (rand()/(1.0 + RAND_MAX));
for(size_t j=0; j < blobs[i].size(); j++) {
int x = blobs[i][j].x;
int y = blobs[i][j].y;
output.at<Vec3b>(y,x)[0] = b;//Vec3b RGB color order
output.at<Vec3b>(y,x)[1] = g;
output.at<Vec3b>(y,x)[2] = r;
}
}
imshow("binary", img);
imshow("labelled", output);
waitKey(0);
return 0;
}
void FindBlobs(const Mat &binary, vector < vector<Point2i> > &blobs)
{
blobs.clear();
Mat label_image;
binary.convertTo(label_image, CV_32SC1);
int label_count = 2; // starts at 2 because 0,1 are used already
for(int y=0; y < label_image.rows; y++) {
int *row = (int*)label_image.ptr(y);
for(int x=0; x < label_image.cols; x++) {
if(row[x] != 1) {
continue;
}
Rect rect;
floodFill(label_image, Point(x,y), label_count, &rect, 0, 0, 4);
vector <Point2i> blob;
for(int i=rect.y; i < (rect.y+rect.height); i++) {
int *row2 = (int*)label_image.ptr(i);
for(int j=rect.x; j < (rect.x+rect.width); j++) {
if(row2[j] != label_count) {
continue;
}
blob.push_back(Point2i(j,i));
}
}
blobs.push_back(blob);
label_count++;
}
}
}
so with this algorithm I recieve blobs
but when I do
if(blobs.size()>50) {
blob.push_back(Point2i(j,i));
}
I recieve black screen. however when I try to
if(blob.size()<50){
blob.push_back(Point2i(j,i));
}
I recieve small blobs what can be the actual problem here ?
Guess you want to store those "big" blobs?
If so, change the following code
blobs.push_back(blob);
label_count++;
to this:
if(blob.size() > 50){
blobs.push_back(blob);
}
label_count++;
And you can receive picture like this: