Histogram Equalization implementation using openCV C++ - c++

I am doing my own implementation of histogram equalization, but it produces some creepy looking images.
I got the color intensity of every pixel, then I got the probability by dividing the color intensity by the number of pixels in the picture. Then, I made a cumulative probability array that I later multiplied by 255 and floored it. This value ended up being the new color. What am I missing?
Before equalization
After equalization
My code:
void pixelFrequency(Mat img, int intensity[])
{
for (int j = 0; j < img.rows; j++)
for (int i = 0; i < img.cols; i++)
intensity[int(img.at<uchar>(j, i))]++;
}
void pixelProbability(Mat img, double probability[], int intensity[])
{
for (int i = 0; i < 256; i++)
probability[i] = intensity[i] / double(img.rows * img.cols);
}
void cumuProbability(double probability[], double cumulativeProbability[])
{
cumulativeProbability[0] = probability[0];
for (int i = 1; i < 256; i++)
cumulativeProbability[i] = probability[i] + cumulativeProbability[i - 1];
}
void histogramEqualization(Mat& img, int intensity[], double probability[], double cumulativeProbability[])
{
pixelFrequency(img, intensity);
pixelProbability(img, probability, intensity);
cumuProbability(probability, cumulativeProbability);
for (int i = 0; i < 256; i++)
cumulativeProbability[i] = floor(cumulativeProbability[i] * 255);
for (int j = 0; j < img.rows; j++)
{
for (int i = 0; i < img.cols; i++)
{
//int color = cumulativeProbability[int(img.at<uchar>(i, j))];
img.at<uchar>(j, i) = cumulativeProbability[int(img.at<uchar>(i, j))];
}
}
}
int main()
{
int intensity[256] = { 0 };
double probability[256] = { 0 };
double cumulativeProbability[256] = { 0 };
Mat img = imread("ex.jpg", CV_LOAD_IMAGE_GRAYSCALE);
histogramEqualization(img, intensity, probability, cumulativeProbability);
namedWindow("image", WINDOW_AUTOSIZE);
imshow("image", img);
waitKey(0);
return 0;
}

Related

OpenCV Using a loop to sum a part of your image error?

I am wanting to move through an image and take a 5x5 grid centered around each pixel in the image. I then want to sum that grid and compare it to a threshold.
int main()
{
Mat element = getStructuringElement(MORPH_RECT, Size(7, 7));
Mat im = imread("blob.png", IMREAD_GRAYSCALE);
bool fromCenter = false;
namedWindow("Crop frame", WINDOW_NORMAL);
Rect2d r = selectROI("Crop frame", im, fromCenter);
im = im(r);
erode(im, im, element);
Mat clone = im;
int sectionSize = 4;
int width = im.cols - sectionSize/2;
int height = im.rows - sectionSize/2;
int sum = 0;
int counter = 0;
for (int i = sectionSize/2; i < width; i++) {
for (int j = sectionSize/2; j < height; j++) {
Rect rect = Rect(i, j, sectionSize, sectionSize);
rect -= Point(rect.width / 2, rect.height / 2);
Mat temp = im(rect);
for (int x = 0; x < temp.cols; x++) {
for (int y = 0; y < temp.rows; y++) {
int pixelValue = (int)temp.at<uchar>(y, x);
sum += pixelValue;
}
}
cout << sum << endl;
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}
namedWindow("erode", WINDOW_NORMAL);
imshow("erode", clone);
waitKey(1);
sum = 0;
}
}
}
I am getting fluctuations in the pixel sum based on where I select my ROI in the image even when both over white space Also, my pixel sum is changing when I change the value of the clone pixel in this section of the code which I do not understand at all:
if (sum > 3800) {
clone.at<uchar>(j, i) = 255;
}
else {
clone.at<uchar>(j, i) = 0;
}

C++ OpenCV Mat pixel value and Opencv Errors

I am writing a simple c++ application using visual studio and opencv that stores Red, Green and Blue values from an image(named src in the code) and stores each Red, Green, Blue pixel values individually in 3 different Mat objects(named RM,BM,GM in the code). I saw this stackOverflow question
and did exactly as the first answer explained. I was able to save all the pixel values just fine, but wasn't able to change pixel values of other images because an Abort() has been called. This is the console window after I run the code.
Console Window
#include<opencv2/core.hpp>
#include<opencv2/highgui.hpp>
#include<opencv2/imgproc.hpp>
#include <stdio.h>
using namespace cv;
using namespace std;
int main() {
String file_name = "C:\\images\\haaand.jpg";
Mat src;
Mat RM, BM, GM;
//RM.create(src.cols, src.rows, CV_8UC(2));
//BM.create(src.cols, src.rows, CV_8UC(2));
//GM.create(src.cols, src.rows, CV_8UC(2));
Vec3b intensity;
Vec3b To[3];
src = imread(file_name);
imshow("src", src);
printf("cols:%d rows:%d \n", src.cols, src.rows);
for (int i = 0; i < src.cols; i++) {
for (int j = 0; j < src.rows; j++) {
intensity = src.at<Vec3b>(j, i);
printf("intensity:%d %d %d \n", intensity[0], intensity[1], intensity[2]);
for (int k = 0; k < 3; k++) {
//uchar bla;
//bla = intensity[k];
for (int p = 0; p < 3; p++) {
To[k][p] = intensity[k];
}
printf("(k:%d) %d %d %d\n", k, To[k][0], To[k][1], To[k][2]);
}
printf("all done\n");
BM.at<Vec3b>(j, i) = To[0];
GM.at<Vec3b>(j, i) = To[1];
RM.at<Vec3b>(j, i) = To[2];
}
}
imshow("RM", RM);
imshow("BM", BM);
imshow("GM", GM);
return 0;
}
Could anyone tell me why why this error might happen?
//RM.create(src.cols, src.rows, CV_8UC(2));
//BM.create(src.cols, src.rows, CV_8UC(2));
//GM.create(src.cols, src.rows, CV_8UC(2));
...
BM.at<Vec3b>(j, i) = To[0];
RM, BM, and GM are not setup. The debugger should show an error when you try to set BM.at<Vec3b>(j, i).
Try instead:
int main()
{
String file_name = "C:\\images\\haaand.jpg";
Mat src = imread(file_name);
Mat RM = Mat(src.size(), CV_8UC3);
Mat BM = Mat(src.size(), CV_8UC3);
Mat GM = Mat(src.size(), CV_8UC3);
Vec3b intensity;
Vec3b To[3];
for(int i = 0; i < src.cols; i++)
{
for(int j = 0; j < src.rows; j++)
{
intensity = src.at<Vec3b>(j, i);
for(int k = 0; k < 3; k++)
for(int p = 0; p < 3; p++)
To[k][p] = intensity[k];
BM.at<Vec3b>(j, i) = To[0];
GM.at<Vec3b>(j, i) = To[1];
RM.at<Vec3b>(j, i) = To[2];
}
}
imshow("src", src);
imshow("RM", RM);
imshow("BM", BM);
imshow("GM", GM);
waitKey(0);
return 0;
}

Laplacian Sharpening result is kinda greyish C++

I am trying to implement laplacian filter for sharpening an image.
but the result is kinda grey , I don't know what went wrong with my code.
Here's my work so far
img = imread("moon.png", 0);
Mat convoSharp() {
//creating new image
Mat res = img.clone();
for (int y = 0; y < res.rows; y++) {
for (int x = 0; x < res.cols; x++) {
res.at<uchar>(y, x) = 0.0;
}
}
//variable declaration
//change -5 to -4 for original result.
int filter[3][3] = { {0,1,0},{1,-4,1},{0,1,0} };
//int filter[3][3] = { {-1,-2,-1},{0,0,0},{1,2,1} };
int height = img.rows;
int width = img.cols;
int **temp = new int*[height];
for (int i = 0; i < height; i++) {
temp[i] = new int[width];
}
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
temp[i][j] = 0;
}
}
int filterHeight = 3;
int filterWidth = 3;
int newImageHeight = height - filterHeight + 1;
int newImageWidth = width - filterWidth + 1;
int i, j, h, w;
//convolution
for (i = 0; i < newImageHeight; i++) {
for (j = 0; j < newImageWidth; j++) {
for (h = i; h < i + filterHeight; h++) {
for (w = j; w < j + filterWidth; w++) {
temp[i][j] += filter[h - i][w - j] * (int)img.at<uchar>(h, w);
}
}
}
}
//find max and min
int max = 0;
int min = 100;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (temp[i][j] > max) {
max = temp[i][j];
}
if (temp[i][j] < min) {
min = temp[i][j];
}
}
}
//clamp 0 - 255
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
res.at<uchar>(i, j) = 0 + (temp[i][j] - min)*(255 - 0) / (max - min);
}
}
//empty the temp array
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
temp[i][j] = 0;
}
}
//img - res and store it in temp array
for (int y = 0; y < res.rows; y++) {
for (int x = 0; x < res.cols; x++) {
//int a = (int)img.at<uchar>(y, x) - (int)res.at<uchar>(y, x);
//cout << a << endl;
temp[y][x] = (int)img.at<uchar>(y, x) - (int)res.at<uchar>(y, x);
}
}
//find the new max and min
max = 0;
min = 100;
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
if (temp[i][j] > max) {
max = temp[i][j];
}
if (temp[i][j] < min) {
min = temp[i][j];
}
}
}
//clamp it back to 0-255
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
res.at<uchar>(i, j) = 0 + (temp[i][j] - min)*(255 - 0) / (max - min);
temp[i][j] = (int)res.at<uchar>(i, j);
}
}
return res;
}
And here's the result
as you can see in my code above , i already normalize the pixel value to 0-255. i still don't know what went wrong here. Can anyone here explain why is that ?
The greyness is because, as Max suggested in his answer, you are scaling to the 0-255 range, not clamping (as your comments in the code suggest).
However, that is not all of the issues in your code. The output of the Laplace operator contains negative values. You nicely store these in an int. But then you scale and copy over to a char. Don't do that!
You need to add the result of the Laplace unchanged to your image. This way, some pixels in your image will become darker, and some lighter. This is what causes the edges to appear sharper.
Simply skip some of the loops in your code, and keep one that does temp = img - temp. That result you can freely scale or clamp to the output range and cast to char.
To clamp, simply set any pixel values below 0 to 0, and any above 255 to 255. Don't compute min/max and scale as you do, because there you reduce contrast and create the greyish wash over your image.
Your recent question is quite similar (though the problem in the code was different), read my answer there again, it suggests a way to further simplify your code so that img-Laplace becomes a single convolution.
The problem is that you are clamping and rescaling the image. Look at the bottom left border of the moon: There are very bright pixels next to very dark pixels, and then some gray pixels right besides the bright ones. Your sharpening filter will really spike on that bright border and increase the maximum. Similarly, the black pixels will be reduced even further.
You then determine minimum and maximum and rescale the entire image. This necessarily means the entire image will lose contrast when displayed in the previous gray scale, because your filter outputted pixel values above 255 and below 0.
Looks closely at the border of the moon in the output image:
There is a black halo (the new 0) and a bright, sharp edge (the new 255). (The browser image scaling made it less crisp in this screenshot, look at your original output). Everything else was squashed by the rescaling, so what was previous black (0) is now dark gray.

variance of sliding window in image

I work on traffic sign detection, firstly I am applied a segmentation on RGB image to obtain red channel image as it is illustrated in image 1:
Secondely I try to find homogeneous region to eliminate not interested region (not a traffic sign) by calculating the variance of sliding window above the image
I use this code but I have always exception
int main(int argc, char** argv)
{
IplImage *image1;
if ((image1 = cvLoadImage("segmenter1/00051.jpg", 0)) == 0)
return NULL;
int rows = image1->width;
int cols = image1->height;
Mat image = Mat::zeros(cols, rows, CV_32FC1);
double x = 0;
double temp = 0;
for (int i = 0; i < rows; i++){
for (int j = 0; j < cols; j++){
temp = cvGet2D(image1, j, i).val[0];
x = temp / 255;
image.at<float>(j, i) = x;
x = image.at<float>(j, i);
}
}
int k = 16;
double seuil = 0.0013;
CvScalar blanc;//pixel blanc
blanc.val[0] = 255;
cv::Scalar mean, stddev; //0:1st channel, 1:2nd channel and 2:3rd channel
for (int j = 0; j < rows - k; j++)
{
for (int i = 0; i < cols - k; i++)
{
double som = 0;
double var = 0;
double t = 0;
for (int jj = j; jj < k+j; jj++)
{
for (int ii = i; ii < k+i; ii++)
{
t = image.at<float>(jj, ii);
som = som + t;
t = t*t;
var =var+ t;
}
}
som = som / (k*k);
if (som>0.18){
var = (var / (k*k)) - (som*som);
if (var < seuil)
cvSet2D(image1, j, i, blanc);
}
}
}
char stsave[80];
cvSaveImage("variance/00051.jpg", image1);
cv::waitKey(0);
return 0;
}
Without the specific exception, I can only guess it is out_of_range. According to opencv docs, cvGet2D and cvSet2D parameters are image, y, x which effectively translates to image, rows, cols. You have flipped the definition of rows, cols and have conflicting usage between the two loops. Maybe fix these and try again.

Weird result from the Kuwahara filter

I am implementing a Kuwahara filter in C++, with OpenCV to help opening and displaying images. The idea is quite straight forward but somehow I got weird result from it. Here' the cose:
#include "opencv2/opencv.hpp"
#include <iostream>
#include <iomanip>
#include <cmath>
using namespace std;
using namespace cv;
//This class is essentially a struct of 4 Kuwahara regions surrounding a pixel, along with each one's mean, sum and variance.
class Regions{
int* Area[4];
int Size[4];
unsigned long long Sum[4];
double Var[4];
int kernel;
public:
Regions(int _kernel) : kernel(_kernel) {
for (int i = 0; i<4; i++) {
Area[i] = new int[kernel*kernel];
Size[i] = 0;
Sum[i] = 0;
Var[i] = 0.0;
}
}
//Update data, increase the size of the area, update the sum
void sendData(int area, int data){
Area[area][Size[area]] = data;
Sum[area] += data;
Size[area]++;
}
//Calculate the variance of each area
double var(int area) {
int __mean = Sum[area]/Size[area];
double temp = 0;
for (int i = 0; i<Size[area]; i++) {
temp+= (Area[area][i] - __mean) * (Area[area][i] - __mean);
}
if (Size[area]==1) return 1.7e38; //If there is only one pixel inside the region then return the maximum of double
//So that with this big number, the region will never be considered in the below minVar()
return sqrt(temp/(Size[area]-1));
}
//Call the above function to calc the variances of all 4 areas
void calcVar() {
for (int i = 0; i<4; i++) {
Var[i] = var(i);
}
}
//Find out which regions has the least variance
int minVar() {
calcVar();
int i = 0;
double __var = Var[0];
if (__var > Var[1]) {__var = Var[1]; i = 1;}
if (__var > Var[2]) {__var = Var[2]; i = 2;}
if (__var > Var[3]) {__var = Var[3]; i = 3;}
return i;
}
//Return the mean of that regions
uchar result(){
int i = minVar();
return saturate_cast<uchar> ((double) (Sum[i] *1.0 / Size[i]));
}
};
class Kuwahara{
private:
int wid, hei, pad, kernel;
Mat image;
public:
Regions getRegions(int x, int y){
Regions regions(kernel);
uchar *data = image.data;
//Update data for each region, pixels that are outside the image's boundary will be ignored.
//Area 1 (upper left)
for (int j = (y-pad >=0)? y-pad : 0; j>= 0 && j<=y && j<hei; j++)
for (int i = ((x-pad >=0) ? x-pad : 0); i>= 0 && i<=x && i<wid; i++) {
regions.sendData(1,data[(j*wid)+i]);
}
//Area 2 (upper right)
for (int j = (y-pad >=0)? y-pad : 0; j<=y && j<hei; j++)
for (int i = x; i<=x+pad && i<wid; i++) {
regions.sendData(2,data[(j*wid)+i]);
}
//Area 3 (bottom left)
for (int j = y; j<=y+pad && j<hei; j++)
for (int i = ((x-pad >=0) ? x-pad : 0); i<=x && i<wid; i++) {
regions.sendData(3,data[(j*wid)+i]);
}
//Area 0 (bottom right)
for (int j = y; j<=y+pad && j<hei; j++)
for (int i = x; i<=x+pad && i<wid; i++) {
regions.sendData(0,data[(j*wid)+i]);
}
return regions;
}
//Constructor
Kuwahara(const Mat& _image, int _kernel) : kernel(_kernel) {
image = _image.clone();
wid = image.cols; hei = image.rows;
pad = kernel-1;
}
//Create new image and replace its pixels by the results of Kuwahara filter on the original pixels
Mat apply(){
Mat temp;
temp.create(image.size(), CV_8U);
uchar* data = temp.data;
for (int j= 0; j<hei; j++) {
for (int i = 0; i<wid; i++)
data[j*wid+i] = getRegions(i,j).result();
}
return temp;
}
};
int main() {
Mat img = imread("limes.tif", 1);
Mat gray, dest;
int kernel = 15;
gray.create(img.size(), CV_8U);
cvtColor(img, gray, CV_BGR2GRAY);
Kuwahara filter(gray, kernel);
dest = filter.apply();
imshow("Result", dest);
imwrite("result.jpg", dest);
waitKey();
}
And here's the result:
As you can see it's different from the correct result, the borders of those limes seem to be duplicated and moved upward. If I apply a 15x15 filter, it gives me a complete mess like this:
I've spent my whole day to debug, but so far nothing is found. I even did the calculation on small images by hand and compare with the result and see no differences.
Could anyone help me find out what did I do wrong?
Many many thanks.
It turns out that there's nothing wrong with my code, but the way I defined a kernel was the source of problem. My kernel is actually one of four small kuwahara sections, while the correct definition of a kernel is the whole area where data is calculated for each pixel, therefore the area that contains all four sections is actually the kernel. So when talked about a 7x7 "kernel", I actually applied a 15x15 one, and the horrible result came not from a 15x15 kernel as I thought, but from a 31x31. At that size, Kuwahara filter simply doesn't make sense and bizarre results are inevitable.