CImg: Image binarization result fails - c++

So, the problem in my following code is that the result of the image binarization becomes too dark. (There was even an example image I have whose binary image becomes wholly black.)
I have been searching any mistake in my code for a very long time, and have found none that seemingly looks problematic to me.
Below is the image I want to binarize:
Image before binarized - in my code is named: "hildebrantmed.bmp"
Below is the resulting binary image:
Image after binarized
Before I show you my source code, here are the 'rules' in the image binarization (since this is an assignment I recently got):
I am not allowed to use any other libraries than CImg.
The programming language to use is C/C++. Not any other else.
Normally, the Otsu's method is the choice. However, I may be allowed to use other algorithms if it is better.
Lastly, here is my source code:
#include <iostream>
#include <CImg.h>
using namespace std;
using namespace cimg_library;
/**
* Generate histogram of the grayscale image
*/
int * generate_histogram(CImg<unsigned char> img)
{
int histogram[256];
// initialize default values for histogram
for (int i = 0; i < 256; i++)
{
histogram[i] = 0;
}
// increment intensity for histogram
for (int i = 0; i < img.height(); i++)
{
for (int j = 0; j < img.width(); j++)
{
int gray_value = img(j, i, 0, 0);
histogram[gray_value]++;
}
}
return histogram;
}
/**
* Find threshold value from the grayscale image's histogram
*/
int otsu_threshold(CImg<unsigned char> img)
{
int * histogram = generate_histogram(img); // image histogram
int total = img.width() * img.height(); // total pixels
double sum = 0;
int i;
for (i = 0; i < 256; i++)
{
sum += i * histogram[i];
}
double sumB = 0;
int wB = 0;
int wF = 0;
double var_max = 0;
int threshold = 0;
for (i = 0; i < 256; i++)
{
wB += histogram[i];
if (wB == 0) continue;
wF = total - wB;
if (wF == 0) continue;
sumB += (double)(i * histogram[i]);
double mB = sumB / wB;
double mF = (sum - sumB) / wF;
double var_between = (double)wB * (double)wF * (mB - mF) * (mB - mF);
if (var_between > var_max)
{
var_max = var_between;
threshold = i;
}
}
return threshold;
}
/**
* Main function
*/
int main(int argc, char * argv[])
{
// retrieve image from its path
CImg<unsigned char> img("hildebrantmed.bmp");
const int width = img.width();
const int height = img.height();
// initialize a new image for img's grayscale
CImg<unsigned char> gray_img(width, height, 1, 1, 0);
// from RGB divided into three separate channels
CImg<unsigned char> imgR(width, height, 1, 3, 0);
CImg<unsigned char> imgG(width, height, 1, 3, 0);
CImg<unsigned char> imgB(width, height, 1, 3, 0);
// for all (x, y) pixels in image
cimg_forXY(img, x, y)
{
imgR(x, y, 0, 0) = img(x, y, 0, 0),
imgG(x, y, 0, 1) = img(x, y, 0, 1),
imgB(x, y, 0, 2) = img(x, y, 0, 2);
// separate the channels
int R = (int)img(x, y, 0, 0);
int G = (int)img(x, y, 0, 1);
int B = (int)img(x, y, 0, 2);
// obtain gray value from different weights of RGB channels
int gray_value = (int)(0.299 * R + 0.587 * G + 0.114 * B);
gray_img(x, y, 0, 0) = gray_value;
}
// find threshold of grayscale image
int threshold = otsu_threshold(gray_img);
// initialize a binary image version of img
CImg<unsigned char> binary_img(width, height, 1, 1, 0);
// for every (x, y) pixel in gray_img
cimg_forXY(img, x, y)
{
int gray_value = gray_img(x, y, 0, 0);
// COMPARE gray_value with threshold
int binary_value;
// gray_value > threshold: 255 (white)
if (gray_value > threshold) binary_value = 255;
// gray_value < threshold: 0 (black)
else binary_value = 0;
// assign binary_value to each of binary_img's pixels
binary_img(x, y, 0, 0) = binary_value;
}
// display the images
CImgDisplay src_disp(img, "Source image");
CImgDisplay gray_disp(gray_img, "Grayscale image");
CImgDisplay binary_disp(binary_img, "Binary image");
while (!src_disp.is_closed() && !gray_disp.is_closed() && !binary_disp.is_closed())
{
src_disp.wait();
gray_disp.wait();
}
return 0;
}
If you find that another algorithm would work better, please provide with the algorithm and source code in your answer. Thanks for your attention.

First error: you're trying to return an array's pointer which actually gets destroyed as soon as the generate_histogram function ends.
To make it work properly, you should supply the pointer to an array from the calling function, something like:
{
//[....]
int histogram[256];
generate_histogram(img, histogram);
//[....]
}
int * generate_histogram(CImg<unsigned char> img, int* arHistogram)
{
//[....]
}

Related

Why does my image get cropped in half when applied Sobel Edge Detector?

Let me start by saying I am very newbie at C++ and so I don't really know the best practices or handle very well with the syntax.
I'm trying to read a black and white image, and using the sobel algorithm to detect its edges and output the result, but halfway my execution I get an error:
munmap_chunk(): invalid pointer
Aborted (core dumped)
Though the image is outputted, it's only half of it and I can't seem to figure out whats causing this.
I wrote the following code:
#include <iostream>
#include <omp.h>
#include "CImg.h"
using namespace std;
using namespace cimg_library;
int main() {
const int x_mask [9] = {
-1, 0, 1,
-2, 0, 2,
-1, 0, 1
};
const int y_mask [9] = {
-1, -2, -1,
0, 0, 0,
1, 2, 1
};
const char* fileName = "test.png";
CImg<float> img = CImg<float>(fileName);
int cols = img.width();
int lines = img.height();
CImg<float> output = CImg<float>(cols, lines, 1, 1, 0.0);
printf("Loading %d x %d image...\n", cols, lines);
const int mask_size = 3;
int gradient_x;
int gradient_y;
// Loop through image ignoring borders
for(int i = 1; i<cols-1; i++) {
for(int j = 1; j<lines-1; j++){
output(j,i) = 0;
gradient_x = 0;
gradient_y = 0;
// Find the x_gradient and y_gradient
for(int m = 0; m < mask_size; m++) {
for(int n = 0; n < mask_size; n++) {
// Neighbourgh pixels
int np_x = j + (m - 1);
int np_y = i + (n - 1);
float v = img(np_x,np_y);
int mask_index = (m*3) + n;
gradient_x = gradient_x + (x_mask[mask_index] * v);
gradient_y = gradient_y + (y_mask[mask_index] * v);
}
}
float gradient_sum = sqrt((gradient_x * gradient_x) + (gradient_y * gradient_y));
if(gradient_sum >= 255) {
gradient_sum = 255;
} else if(gradient_sum <= 0) {
gradient_sum = 0;
}
output(j, i) = gradient_sum;
}
}
printf("Outputed image of size %d x %d\n", output.width(), output.height());
output.save("test_edges.png");
return 0;
}
Applied to this image:
I get this ouput:

How to open two transparent images inside opencv in C++

I have 2 transparent images which I want them to be opened in one window in opencv.
The code below opens two images but with out alpha channel because it's using (CV_8UC3) and those images which are transparent we should use (CV_8UC4) to show the transparency but when I change the code to (CV_8UC4) the program gives me an error.
I'll put a # on the line of (CV_8UC4);
I'll put a * on the line of error;
//the code turned to 3 parts because of * and #.
Is this code suitable for what I want to do?
This code opens multiple images but i've use it for two.
code is:
void ShowManyImages(string title, int nArgs, ...) {
int size;
int i;
int m, n;
int x, y;
// w - Maximum number of images in a row
// h - Maximum number of images in a column
int w, h;
// scale - How much we have to resize the image
float scale;
int max;
// If the number of arguments is lesser than 0 or greater than 12
// return without displaying
if (nArgs <= 0) {
printf("Number of arguments too small....\n");
return;
}
else if (nArgs > 14) {
printf("Number of arguments too large, can only handle maximally 12 images at a time ...\n");
return;
}
// Determine the size of the image,
// and the number of rows/cols
// from number of arguments
else if (nArgs == 1) {
w = h = 1;
size = 300;
}
else if (nArgs == 2) {
w = 2; h = 1;
size = 1000;
}
else if (nArgs == 3 || nArgs == 4) {
w = 2; h = 2;
size = 300;
}
else if (nArgs == 5 || nArgs == 6) {
w = 3; h = 2;
size = 200;
}
else if (nArgs == 7 || nArgs == 8) {
w = 4; h = 2;
size = 200;
}
else {
w = 4; h = 3;
size = 150;
}
// Create a new 3 channel image
# Mat DispImage = Mat::zeros(Size(100 + size*w, 60 + size*h), CV_8UC4);
// Used to get the arguments passed
va_list args;
va_start(args, nArgs);
// Loop for nArgs number of arguments
for (i = 0, m = 20, n = 20; i < nArgs; i++, m += (0)) {
// Get the Pointer to the IplImage
Mat img = va_arg(args, Mat);
// Check whether it is NULL or not
// If it is NULL, release the image, and return
if (img.empty()) {
printf("Invalid arguments");
return;
}
// Find the width and height of the image
x = img.cols;
y = img.rows;
// Find whether height or width is greater in order to resize the image
max = (x > y) ? x : y;
// Find the scaling factor to resize the image
scale = (float)((float)max / size);
// Used to Align the images
if (i % w == 0 && m != 20) {
m = 20;
n += 20 + size;
}
// Set the image ROI to display the current image
// Resize the input image and copy the it to the Single Big Image
Rect ROI(m, n, (int)(x / scale), (int)(y / scale));
Mat temp; resize(img, temp, Size(ROI.width, ROI.height));
* temp.copyTo(DispImage(ROI));
}
// Create a new window, and show the Single Big Image
namedWindow(title, 1);
imshow(title, DispImage);
waitKey();
// End the number of arguments
va_end(args);
}
int main(int argc, char** argv)
{
Mat img1 = imread("c:\\1.png");
Mat img2 = imread("c:\\2.png");
ShowManyImages("Image", 2, img1, img2);
return 0;
}

How to implement Filter2d Opencv in C++ without the builtin function? [duplicate]

I have been tasked with making my own Sobel method, and not use the cv::Sobel found in OpenCV.
I tried implementing one I found at Programming techniques
When I run the program, cv::Mat throws an error, however. Anyone have any idea why?
Sobel method:
int sobelCorrelation(Mat InputArray, int x, int y, String xory)
{
if (xory == "x") {
return InputArray.at<uchar>(y - 1, x - 1) +
2 * InputArray.at<uchar>(y, x - 1) +
InputArray.at<uchar>(y + 1, x - 1) -
InputArray.at<uchar>(y - 1, x + 1) -
2 * InputArray.at<uchar>(y, x + 1) -
InputArray.at<uchar>(y + 1, x + 1);
}
else if (xory == "y")
{
return InputArray.at<uchar>(y - 1, x - 1) +
2 * InputArray.at<uchar>(y - 1, x) +
InputArray.at<uchar>(y - 1, x + 1) -
InputArray.at<uchar>(y + 1, x - 1) -
2 * InputArray.at<uchar>(y + 1, x) -
InputArray.at<uchar>(y + 1, x + 1);
}
else
{
return 0;
}
}
Calling and processing it in another function:
void imageOutput(Mat image, String path) {
image = imread(path, 0);
Mat dst;
dst = image.clone();
int sum, gx, gy;
if (image.data && !image.empty()){
for (int y = 0; y < image.rows; y++)
for (int x = 0; x < image.cols; x++)
dst.at<uchar>(y, x) = 0.0;
for (int y = 1; y < image.rows - 1; ++y) {
for (int x = 1; x < image.cols - 1; ++x){
gx = sobelCorrelation(image, x, y, "x");
gy = sobelCorrelation(image, x, y, "y");
sum = absVal(gx) + absVal(gy);
if (sum > 255)
sum = 255;
else if (sum < 0)
sum = 0;
dst.at<uchar>(x, y) = sum;
}
}
namedWindow("Original");
imshow("Original", image);
namedWindow("Diagonal Edges");
imshow("Diagonal Edges", dst);
}
waitKey(0);
}
Main:
int main(int argc, char* argv[]) {
Mat image;
imageOutput(image, "C:/Dropbox/2-falling-toast-ted-kinsman.jpg");
return 0;
}
The absVal method:
int absVal(int v)
{
return v*((v < 0)*(-1) + (v > 0));
}
When run it throws this error:
Unhandled exception at 0x00007FFC9365A1C8 in Miniproject01.exe: Microsoft C++ exception: cv::Exception at memory location 0x000000A780A4F110.
and points to here:
template<typename _Tp> inline
_Tp& Mat::at(int i0, int i1)
{
CV_DbgAssert( dims <= 2 && data && (unsigned)i0 < (unsigned)size.p[0] &&
(unsigned)(i1 * DataType<_Tp>::channels) < (unsigned)(size.p[1] * channels()) &&
CV_ELEM_SIZE1(DataType<_Tp>::depth) == elemSize1());
return ((_Tp*)(data + step.p[0] * i0))[i1];
}
If anyone have any advice or ideas what I am doing wrong it would be greatly appreciated!
This code snippet is to demonstrate how to compute Sobel 3x3 derivatives convolving the image with Sobel kernels. You can easily extend to different kernel sizes giving the kernel radius as input to my_sobel, and creating the appropriate kernel.
#include <opencv2\opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
void my_sobel(const Mat1b& src, Mat1s& dst, int direction)
{
Mat1s kernel;
int radius = 0;
// Create the kernel
if (direction == 0)
{
// Sobel 3x3 X kernel
kernel = (Mat1s(3,3) << -1, 0, +1, -2, 0, +2, -1, 0, +1);
radius = 1;
}
else
{
// Sobel 3x3 Y kernel
kernel = (Mat1s(3, 3) << -1, -2, -1, 0, 0, 0, +1, +2, +1);
radius = 1;
}
// Handle border issues
Mat1b _src;
copyMakeBorder(src, _src, radius, radius, radius, radius, BORDER_REFLECT101);
// Create output matrix
dst.create(src.rows, src.cols);
// Convolution loop
// Iterate on image
for (int r = radius; r < _src.rows - radius; ++r)
{
for (int c = radius; c < _src.cols - radius; ++c)
{
short s = 0;
// Iterate on kernel
for (int i = -radius; i <= radius; ++i)
{
for (int j = -radius; j <= radius; ++j)
{
s += _src(r + i, c + j) * kernel(i + radius, j + radius);
}
}
dst(r - radius, c - radius) = s;
}
}
}
int main(void)
{
Mat1b img = imread("path_to_image", IMREAD_GRAYSCALE);
// Compute custom Sobel 3x3 derivatives
Mat1s sx, sy;
my_sobel(img, sx, 0);
my_sobel(img, sy, 1);
// Edges L1 norm
Mat1b edges_L1;
absdiff(sx, sy, edges_L1);
// Check results against OpenCV
Mat1s cvsx,cvsy;
Sobel(img, cvsx, CV_16S, 1, 0);
Sobel(img, cvsy, CV_16S, 0, 1);
Mat1b cvedges_L1;
absdiff(cvsx, cvsy, cvedges_L1);
Mat diff_L1;
absdiff(edges_L1, cvedges_L1, diff_L1);
cout << "Number of different pixels: " << countNonZero(diff_L1) << endl;
return 0;
}
If i were you, i would almost always avoid using for loops(if possible). Unnecessary for loops tend to slow down the execution. Instead, reuse wherever possible. For example, the code below uses filter2D give 2d Correlation result:
Mat kern = (Mat_<float>(3,3)<<-1,0,1,-2,0,2,-1,0,1);
Mat dest;
cv::filter2D(src,dest,src.type(),kern);
If you would like to get convolution results, you would need to flip the kernel 'kern' before filtering.
cv::flip(kern,kern, -1);
If you would like to squeeze more performance, you can use separable filters 'sepFilter2D'.
thanks for the post,
I was able to generate gradiant map using the above kernel, and using openCV code filter2D getting from
Using custom kernel in opencv 2DFilter - causing crash ... convolution how?
to convolve the image with the kernel. the code that I used is
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
using namespace cv;
using namespace std;
int main(int argc, char** argv) {
//Loading the source image
Mat src;
//src = imread("1.png");
src = cv::imread("E:\\Gray_Image.bmp", 0);
//Output image of the same size and the same number of channels as src.
Mat dst1,dst2,grad;
//Mat dst = src.clone(); //didn't help...
//desired depth of the destination image
//negative so dst will be the same as src.depth()
int ddepth = -1;
//the convolution kernel, a single-channel floating point matrix:
//Mat kernel = imread("kernel.png");
Mat kernel_x = (Mat_<float>(3, 3) << -1, 0, 1, -2, 0, 2, -1, 0, 1);
Mat kernel_y = (Mat_<float>(3, 3) << -1, -2, -1, 0, 0, 0, 1, 2, 1);
kernel_x.convertTo(kernel_x, CV_32F); kernel_y.convertTo(kernel_y, CV_32F); //<<not working
//normalize(kernel, kernel, 1.0, 0.0, 4, -1, noArray()); //doesn't help
//cout << kernel.size() << endl; // ... gives 11, 11
//however, the example from tutorial that does work:
//kernel = Mat::ones( 11, 11, CV_32F )/ (float)(11*11);
//default value (-1,-1) here means that the anchor is at the kernel center.
Point anchor = Point(-1, -1);
//value added to the filtered pixels before storing them in dst.
double delta = 0;
//alright, let's do this...
filter2D(src, dst1, ddepth, kernel_x, anchor, delta, BORDER_DEFAULT);
filter2D(src, dst2, ddepth, kernel_y, anchor, delta, BORDER_DEFAULT);
imshow("Source", src); //<< unhandled exception here
//imshow("Kernel1", kernel_x); imshow("Kernel2", kernel_y);
imshow("Destination1", dst1);
imshow("Destination2", dst2);
addWeighted(dst1, 0.5, dst2, 0.5, 0, grad);
imshow("Destination3", grad);
waitKey(1000000);
return 0;
}

MedianFilter for large kernels and 16 bit unsigned images

Update: I have now used the new approach.
Mat MedianFilter(Mat img, ushort SizeY, ushort SizeX, ushort kernelSize){
// Alloc memory for images
Mat medianImg(SizeY, SizeX, CV_16U, Scalar(0)), padded;
copyMakeBorder(img, padded, kernelSize/2, kernelSize/2, kernelSize/2, kernelSize/2, BORDER_CONSTANT, Scalar(0));
for (unsigned y = 0; y < 50; y++){
for (unsigned x = 0; x < 50; x++){
// Set median at central pixel pos
medianImg.at<ushort>(y, x) = getMedian(padded(Rect(x, y, kernelSize, kernelSize)));//sorter[n];
//sorter.clear();
}
return medianImg;
}
And the get Median function adapted to 16 bit from the opencv routine:
int getMedian(Mat& img){
int channels = 0;
Mat hist;
int hist_size = 65535;
float range[] = {0, 65535} ;
const float* ranges[] = {range};
calcHist(&img, 1, &channels, Mat(), hist, 1, &hist_size, ranges);
float *ptr = hist.ptr<float>();
int median = 0, sum = 0;
int thresh = (int)img.total() / 2;
while(sum < thresh && median < 65535) {
sum += static_cast<int>(ptr[median]);
median++;
}
return median;
}
The problem now is that, I'm not sure if the borders are correct.

entropy for a gray image in opencv

I need code to find entropy of an image.
for(int i=0;i<grey_image.rows;i++)
{
for(int j=1;j<grey_image.cols;j++)
{
//cout<<i<<" "<<j<<" "<<(int)grey_image.at<uchar>(i,j)<<endl;
int a=(int)grey_image.at<uchar>(i,j);
int b=(int)grey_image.at<uchar>(i,j-1);
int x=a-b;
if(x<0)
x=0-x;
probability_array[x]++;
//grey_image.at<uchar>(i,j) = 255;
}
}
//calculating probability
int n=rows*cols;
for(int i=0;i<256;i++)
{
probability_array[i]/=n;
//cout<<probability_array[i]<<endl;
}
// galeleo team formula
float entropy=0;
for(int i=0;i<256;i++)
{
if (probability_array[i]>0)
{
float x=probability_array[i]*log(probability_array[i]);
entropy+=x;
}
}
return 0-entropy;
Actually I am using this to dump in a programmable camera to measure entropy. Now I want to use it in windows system. I am getting entropy of a gray image as zero.Please help me out. Where did I go wrong.
Without knowing what image are you using, we cannot know if a zero entropy result is not the right answer (as suggested by #Xocoatzin).
Besides, your code can benefit from some of the latest OpenCV features 😊: Here is a working implementation using OpenCV histograms and matrix expressions:
if (frame.channels()==3) cvtColor(frame,frame,CV_BGR2GRAY);
/// Establish the number of bins
int histSize = 256;
/// Set the ranges ( for B,G,R) )
float range[] = { 0, 256 } ;
const float* histRange = { range };
bool uniform = true; bool accumulate = false;
/// Compute the histograms:
calcHist( &frame, 1, 0, Mat(), hist, 1, &histSize, &histRange, uniform, accumulate );
hist /= frame.total();
hist += 1e-4; //prevent 0
Mat logP;
cv::log(hist,logP);
float entropy = -1*sum(hist.mul(logP)).val[0];
cout << entropy << endl;
here is what i m using, hope it helps; https://github.com/samidalati/OpenCV-Entropy you can find couple of methods to calculate the entropy of colored and grayscaled images using OpenCV
float entropy(Mat seq, Size size, int index)
{
int cnt = 0;
float entr = 0;
float total_size = size.height * size.width; //total size of all symbols in an image
for(int i=0;i<index;i++)
{
float sym_occur = seq.at<float>(0, i); //the number of times a sybmol has occured
if(sym_occur>0) //log of zero goes to infinity
{
cnt++;
entr += (sym_occur/total_size)*(log2(total_size/sym_occur));
}
}
cout<<"cnt: "<<cnt<<endl;
return entr;
}
// myEntropy calculates relative occurrence of different symbols within given input sequence using histogram
Mat myEntropy(Mat seq, int histSize)
{
float range[] = { 0, 256 } ;
const float* histRange = { range };
bool uniform = true; bool accumulate = false;
Mat hist;
/// Compute the histograms:
calcHist( &seq, 1, 0, Mat(), hist, 1, &histSize, &histRange, uniform, accumulate );
return hist;
}
enter code here
//Calculate Entropy of 2D histogram
double Sum_prob_1k = 0, Sum_prob_kl = 0, Sum_prob_ln_1k = 0, Sum_prob_ln_kl = 0;
for (int k = start; k < end; k++)
{
Sum_prob_1k = 0; Sum_prob_kl = 0;
Sum_prob_ln_1k = 0; Sum_prob_ln_kl = 0;
//i=1 need to be start = 1
for (int i = 1; i < k; i++)
{
Sum_prob_1k += HiGreyN[i];
if (HiGreyN[i] != 0)
Sum_prob_ln_1k += (HiGreyN[i] * System.Math.Log(HiGreyN[i]));
}
for (int i = k; i < end; i++)
{
Sum_prob_kl += HiGreyN[i];
if (HiGreyN[i] != 0)
Sum_prob_ln_kl += (HiGreyN[i] * System.Math.Log(HiGreyN[i]));
}
//Final equation of entropy for each K
EiGrey[k] = System.Math.Log(Sum_prob_1k) + System.Math.Log(Sum_prob_kl) -
(Sum_prob_ln_1k / Sum_prob_1k) - (Sum_prob_ln_kl / Sum_prob_kl);
if (EiGrey[k] < 0)
EiGrey[k] = 0;
}
//End calculating 2D Entropy