Floyd Steinberg Dithering, why the effect is not ideal? - c++

Why do the graphics generated by my dithering algorithm have many black spots
here is picture
http://yanxuan.nosdn.127.net/00a07a53ab2083685da1a90d09452f69.png
Below is my code of Floyd Steinberg Dithering.
QImage * SaveThread::imageFloydSteinberg(QImage * origin)
{
int old_pix, new_pix, quant_err;
int width = origin->width();
int height = origin->height();
QImage * img_dither = new QImage(width, height, QImage::Format_ARGB32);
img_dither = origin;
for (int j = 0; j < height; j++)
{
for (int i = 0; i < width; i++)
{
old_pix = img_dither->pixel(i, j);
if (img_dither->pixel(i, j) > qRgb(128, 128, 128))
new_pix = qRgb(255, 255, 255);
else
new_pix = qRgb(0, 0, 0);
img_dither->setPixel(i, j, qRgb(new_pix,new_pix,new_pix));
quant_err = old_pix - new_pix;
img_dither->setPixel(i+1, j , img_dither->pixel(i+1, j ) + quant_err * 7 / 16);
img_dither->setPixel(i-1, j+1, img_dither->pixel(i-1, j+1) + quant_err * 3 / 16);
img_dither->setPixel(i , j+1, img_dither->pixel(i , j+1) + quant_err * 5 / 16);
img_dither->setPixel(i+1, j+1, img_dither->pixel(i+1, j+1) + quant_err * 1 / 16);
}
}
return img_dither;
}

Related

Region Growing for Cr and Cb (YCbCr) components

I want to implement region growing algorithm for components Cr and Cb (YCbCr) (separate and combined) with manually chosen seed point (mouse click).
At the moment I have two functions that implement region growing for the H component in the HSV color space.
bool isOk(int new_x, int new_y, int width, int height)
{
if (new_x < 0 || new_y < 0 || new_x >= width || new_y >= height)
return false;
return true;
}
void lab04_MouseCallback(int event, int x, int y, int flags, void* param)
{
Mat* src = (Mat*)param;
int height = (*src).rows;
int width = (*src).cols;
if (event == CV_EVENT_LBUTTONDOWN)
{
printf("Seed point(x,y): %d,%d\n", x, y);
Mat labels = Mat::zeros((*src).size(), CV_16UC1);
int w = 3,
hue_avg = 0,
inf_x, sup_x,
inf_y, sup_y,
cnt = 0;
inf_x = (x - w < 0) ? 0 : x - w;
inf_y = (y - w < 0) ? 0 : y - w;
sup_x = (x + w >= width) ? (width - 1) : x + w;
sup_y = (y + w >= height) ? (height - 1) : y + w;
printf("inf x: %d sup x: %d --- inf y: %d sup y: %d\n", inf_x, sup_x, inf_y, sup_y);
for (int i = inf_y; i <= sup_y; ++i)
{
for (int j = inf_x; j <= sup_x; ++j)
{
hue_avg += (*src).data[i * width + j];
//printf("H at <%d, %d> is %d\n", i, j, (*src).data[i * width + j]);
}
}
hue_avg /= (sup_x - inf_x + 1) * (sup_y - inf_y + 1);
printf("Hue average: %d\n\n", hue_avg);
int k = 1, N = 1, hue_std = 10;
int konst = 3;
int T = konst * (float)hue_std;
queue<Point> Q;
Q.push(Point(x, y));
while (!Q.empty())
{
int dx[8] = { -1, 0, 1, 1, 1, 0, -1, -1 };
int dy[8] = { -1, -1, -1, 0, 1, 1, 1, 0 };
Point temp = Q.front();
Q.pop();
for (int dir = 0; dir < 8; ++dir)
{
int new_x = temp.x + dx[dir];
int new_y = temp.y + dy[dir];
if (isOk(new_x, new_y, width, height))
{
//printf("(%d, %d)\n", new_x, new_y);
if (labels.at<ushort>(new_y, new_x) == 0)
{
//printf("labels(%d, %d) = %hu\n", new_x, new_y, labels.at<ushort>(new_y, new_x));
if (abs((*src).at<uchar>(new_y, new_x) - hue_avg) < T)
{
//printf("this one\n");
Q.push(Point(new_x, new_y));
labels.at<ushort>(new_y, new_x) = k;
hue_avg = ((N * hue_avg) + (*src).at<uchar>(new_y, new_x)) / (N + 1);
++N;
}
}
}
}
}
Mat dst = (*src).clone();
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
if (labels.at<ushort>(i, j) == 1)
{
dst.at<uchar>(i, j) = 255;
}
else
{
dst.at<uchar>(i, j) = 0;
}
}
}
imshow("dst", dst);
}
}
void lab04_MouseClick()
{
Mat src;
Mat hsv;
// Read image from file
char fname[MAX_PATH];
while (openFileDlg(fname))
{
src = imread(fname);
int height = src.rows;
int width = src.cols;
//Create a window
namedWindow("My Window", 1);
// Aplicare FTJ gaussian pt. eliminare zgomote: essential sa il aplicati
GaussianBlur(src, src, Size(5, 5), 0, 0);
// Componenta de culoare Hue a modelului HSV
Mat H = Mat(height, width, CV_8UC1);
// definire pointeri la matricea (8 biti/pixeli) folosita la stocarea
// componentei individuale H
uchar* lpH = H.data;
cvtColor(src, hsv, CV_BGR2HSV); // conversie RGB -> HSV
// definire pointer la matricea (24 biti/pixeli) a imaginii HSV
uchar* hsvDataPtr = hsv.data;
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
// index in matricea hsv (24 biti/pixel)
int hi = i * width * 3 + j * 3;
int gi = i * width + j; // index in matricea H (8 biti/pixel)
lpH[gi] = hsvDataPtr[hi] * 510 / 360; // lpH = 0 .. 255
}
}
//set the callback function for any mouse event
setMouseCallback("My Window", lab04_MouseCallback, &H);
//show the image
imshow("My Window", src);
// Wait until user press some key
waitKey(0);
}
}
How can I change this code to be for components Cr and Cb?

Problem with Operation on Border pixels of an image

I am trying to implement a demosaicing algorithm (interpolation) for a raw image with the Bayer pattern GRBG. The program logic was to use the neighboring pixels to assign the values to R,G and B channels(I have attached the code). I am having a problem for this logic at the border pixels. For example let i be the pixel at (0,0), I need the value of i-1 which is not present in the image. My question is there a possibility to work around this like masking i-1 and the others as 0 without adding an new border of zeros to my existing image.
Any suggestions will be helpful.
thanks.
int rows = 256;
int cols = 512;
Mat raw_img(rows, cols, CV_8U); //////////////////////
Mat image(rows, cols, CV_8UC3); // BAYER PATTERN //
cvtColor(image, image, COLOR_BGR2RGB); // G R //
for (int i = 0; i < raw_img.rows; i++) { // B G //
for (int j = 0; j < raw_img.cols; j++) { //////////////////////
if ((i % 2 == 0) && (j % 2 == 0))//top green
{
image.at<Vec3b>(i, j)[0] = (raw_img.at<uchar>(i - 1, j) +
raw_img.at<uchar>(i + 1, j)) / 2; //red
image.at<Vec3b>(i, j)[1] = (raw_img.at<uchar>(i, j) * 2); //blue
image.at<Vec3b>(i, j)[2] = (raw_img.at<uchar>(i, j - 1) +
raw_img.at<uchar>(i, j + 1)) / 2; //green
}
else if ((i % 2 == 0) && (j % 2 == 1))//red
{
image.at<Vec3b>(i, j)[0] = (raw_img.at<uchar>(i, j)); //red
image.at<Vec3b>(i, j)[1] = (raw_img.at<uchar>(i - 1, j) +
raw_img.at<uchar>(i + 1, j) +
raw_img.at<uchar>(i, j - 1) +
raw_img.at<uchar>(i, j + 1)) / 2;//green
image.at<Vec3b>(i, j)[2] = (raw_img.at<uchar>(i + 1, j - 1) +
raw_img.at<uchar>(i - 1, j + 1) +
raw_img.at<uchar>(i + 1, j + 1) +
raw_img.at<uchar>(i - 1, j - 1)) / 4;//blue
}
else if ((i % 2 == 1) && (j % 2 == 0))//blue
{
image.at<Vec3b>(i, j)[0] = (raw_img.at<uchar>(i + 1, j - 1) +
raw_img.at<uchar>(i - 1, j + 1) +
raw_img.at<uchar>(i + 1, j + 1) +
raw_img.at<uchar>(i - 1, j - 1)) / 4;//red
image.at<Vec3b>(i, j)[1] = (raw_img.at<uchar>(i + 1, j) +
raw_img.at<uchar>(i, j + 1) +
raw_img.at<uchar>(i - 1, j) +
raw_img.at<uchar>(i, j + 1)) / 2;//green
image.at<Vec3b>(i, j)[0] = (raw_img.at<uchar>(i, j));//blue
}
else // bottom green
{
image.at<Vec3b>(i, j)[0] = (raw_img.at<uchar>(i, j - 1) +
raw_img.at<uchar>(i, j + 1)) / 2;//red
image.at<Vec3b>(i, j)[1] = (raw_img.at<uchar>(i, j) * 2);//blue
image.at<Vec3b>(i, j)[2] = (raw_img.at<uchar>(i - 1, j) +
raw_img.at<uchar>(i + 1, j)) / 2;//green
}
}
}
You could do something like:
image.at<Vec3b>(i, j)[0] = (raw_img.at<uchar>(max(0,i - 1), j)
+ raw_img.at<uchar>(min(i + 1,raw_img.rows-1), j)) / 2; //red
For all you i +/- 1 , j +/-1: this way you "replicate" border values by simply sticking to the last value value in the X or Y dimension
As a side note, openCV includes different demosaic algorithm that will be hard to beat (for both quality and execution speed)
The above stated answer works. But to prevent the hassle of using min,max with each pixel. It can be done as shown below with an Opencv function:
int main(int argc, char** argv)
{
Mat img_rev = imread("C:/Users/20181217/Desktop/images/imgs/den_check.png");
//number of additional rows and columns
int top, left, right, bottom;
top = 1;
left = 1;
right = 1;
bottom = 1;
//define new image with additional borders
Mat img_clamp(img_rev.rows + 2, img_rev.cols + 2, CV_8UC3);
//if you want to pad the image with zero's
copyMakeBorder(img_rev, img_clamp, top, left, right, bottom, BORDER_CONSTANT);
//if you want to replicate the border of the image
copyMakeBorder(img_rev, img_clamp, top, left, right, bottom, BORDER_REPLICATE);
//Now you can access the image without having to worry about the borders as shown below
for(int i=1;i<img_clamp.rows-1;i++)
{
for(int j=1;i<img_clamp.cols-1;i++)
{
...
}
}
waitKey(100000);
return 0;
}
More operations can be found here:
https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html?highlight=copymakeborder#copymakeborder

Edge detection for color images CannyAlgorithm

This is how I managed to use a Sobel Kernel on a GRAYSCALE image.However,I dont actually get how to modify it for a color image.
void Soble()
{
Mat img;
int w = 3;
int k = w / 2;
char fname[MAX_PATH];
openFileDlg(fname);
img = imread(fname, CV_LOAD_IMAGE_GRAYSCALE);
gaussianFiltering(img);
Mat destinationImg = img.clone();
float sobelY[3][3] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 };
float sobelX[3][3] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 };
for (int i = k; i < img.rows - k; i++)
{
for (int j = k; j < img.cols - k; j++)
{
float Gx = 0, Gy = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
Gx += img.at<uchar>(i + l - k, j + p - k)*sobelX[l][p];
Gy += img.at<uchar>(i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<uchar>(i, j) = sqrt(Gx*Gx + Gy * Gy) / (4 * sqrt(2));
}
}
imshow("Intermediar",destinationImg);
imshow("Initial", img);
waitKey(0);
}
I thought of using each RGB chanel but it does not work and even give some errors.
float GxR = 0, GyR = 0;
float GxG = 0, GyG = 0;
float GxB = 0, GyB = 0;
for (int l = 0; l < w; l++)
{
for (int p = 0; p < w; p++)
{
GxR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelX[l][p];
GxG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelX[l][p];
GxB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelX[l][p];
GyR += img.at<Vec3b>[0](i + l - k, j + p - k)*sobelY[l][p];
GyG += img.at<Vec3b>[1](i + l - k, j + p - k)*sobelY[l][p];
GyB += img.at<Vec3b>[2](i + l - k, j + p - k)*sobelY[l][p];
}
}
destinationImg.at<Vec3b>[0](i, j) = sqrt(GxR*GxR + GyR * GyR) / (4 * sqrt(2));
destinationImg.at<Vec3b>[1](i, j) = sqrt(GxG*GxG + GyB * GyB) / (4 * sqrt(2));
destinationImg.at<Vec3b>[2](i, j) = sqrt(GxG*GxG + GyG * GyG) / (4 * sqrt(2));
Can you please explain how can this code must be rewritten?
You access the image data the wrong way.
destinationImg.at<Vec3b>[0](i, j)
destinationImg is a Mat of type Vec3b. That means it's a 2d array of three dimensional vectors.
You'r [ ] operator is in the wrong place...
The subscript error message tells you that you're using that operator on something that is neither a pointer nor an array which is not possible.
You get the other error message because you have that operator where the (i,j) is expected.
First you have to get one of these vectors, then you can get its elements.
destinationImg.at<Vec3b>(i,j) will give you the vector at i,j.
destinationImg.at<Vec3b>(i,j)[0] will give you the first element of that vector.
Example from the OpenCV documentation:
Vec3b intensity = img.at<Vec3b>(y, x);
uchar blue = intensity.val[0];
uchar green = intensity.val[1];
uchar red = intensity.val[2];
http://docs.opencv.org/2.4.13.2/doc/user_guide/ug_mat.html

Gaussian Blur image processing c++

after trying to implement a Gaussian blur for an image i have ran into a problem where the output image looks like multiple blurred versions of the original image (input image)
I have too low of a reputation to post images so have no idea how to fully show you what is happening however, i can post a gyazo link to the image:
https://gyazo.com/38fbe1abd442a3167747760866584655 - Original,
https://gyazo.com/471693c49917d3d3e243ee4156f4fe12 - Output
Here is some code:
int kernel[3][3] = { 1, 2, 1,
2, 4, 2,
1, 2, 1 };
void guassian_blur2D(unsigned char * arr, unsigned char * result, int width, int height)
{
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++)
{
for (int k = 0; k < 3; k++)
{
result[3 * row * width + 3 * col + k] = accessPixel(arr, col, row, k, width, height);
}
}
}
}
int accessPixel(unsigned char * arr, int col, int row, int k, int width, int height)
{
int sum = 0;
int sumKernel = 0;
for (int j = -1; j <= 1; j++)
{
for (int i = -1; i <= 1; i++)
{
if ((row + j) >= 0 && (row + j) < height && (col + i) >= 0 && (col + i) < width)
{
int color = arr[(row + j) * 3 * width + (col + i) * 3 + k];
sum += color * kernel[i + 1][j + 1];
sumKernel += kernel[i + 1][j + 1];
}
}
}
return sum / sumKernel;
}
Image is saved:
guassian_blur2D(inputBuffer, outputBuffer, width, height);
//Save the processed image
outputImage.convertToType(FREE_IMAGE_TYPE::FIT_BITMAP);
outputImage.convertTo24Bits();
outputImage.save("appleBlur.png");
cout << "Blur Complete" << endl;
Any help would be great, if this also helps i am trying to store the image as a grey-scale so that no colour is saved.
Looks like the problem is not within your blurring code, and is related to saving or accessing image data.
I have used OpenCV to read/save images, and got expected result. Here's a snippet:
cv::Mat3b img = cv::imread("path_to_img.png");
cv::Mat3b out = img.clone();
guassian_blur2D(img.data, out.data, img.cols, img.rows);
cv::imshow("img", img);
cv::imshow("out", out);
cv::waitKey(0);
And here are input and output images:
The blur is not very noticeable (due to high image resolution and small kernel), but if you look carefully - it looks correct.

Histogram of oriented gradiants

For a project I'm writing some code to compute the HoG of some images, but I'm stuck with the fact that my orientations are only between 0 ~ 90 degrees, while using the atan2 function.
I'm guessing that this problem occurs due to the filter2D function of OpenCV but I'm not sure if this is the reason or that I'm doing something else wrong:
Vector<Vector<Mat_<float>>> HoG(Mat image) {
Mat img_x;
Mat img_y;
IplImage img = image;
Mat kern_x = (Mat_<char>(1, 3) << -1, 0, 1);
Mat kern_y = (Mat_<char>(3, 1) << -1, 0, 1);
filter2D(image, img_x, image.depth(), kern_x);
filter2D(image, img_y, image.depth(), kern_y);
Vector<Vector<Mat_<float>>> histograms;
for(int y = 0; y < image.rows - size; y += size) {
Vector<Mat_<float>> temp_hist;
for(int x = 0; x < image.cols - size; x += size) {
float total_mag = 0;
Mat hist = Mat::zeros(1, 8, CV_32FC1);
for(int i = y; i < y + size; ++i) {
for(int j = x; j < x + size; ++j) {
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
double ori = myatan2(grad_x, grad_y);
float mag = sqrt(pow(grad_x, 2) + pow(grad_y, 2));
int bin = round(ori/45);
hist.at<float>(0, (bin - 1 < 0 ? 7 : bin - 1)) += - (float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
hist.at<float>(0, bin) += -(float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
total_mag += mag;
}
}
// Normalize the histogram
for(int i = 0; i < 8; ++i) {
hist.at<float>(0, i) = hist.at<float>(0, i) / total_mag;
}
temp_hist.push_back(hist);
}
histograms.push_back(temp_hist);
}
return histograms;
}
If you have any other tips to increase a speed-up in my code or something else that is also welcome of course.
I notice this:
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
You seem to be using uchar. Should this not be char?