Edge detection - bad detection - c++

Hello you all and thank you
I am a student and I am writing a c++ code to my final project.
My code problem is with Edge Detection algorithm (Image processing),
while i am running Edge Detection algorithm in MATLAB I gets a good Edge detection, but if I am running the algorithm code written in c++, the created picture is with bad detection.
I tried to detect the edge with Matlb by using threshold of 0.03 and the detection was great( the changes in my project is very low (little changes on white surface).
Thank you so much
Idan.
maybe someone can help me, this is my code:
void ApplySobelFilter(unsigned char src[][NUMBER_OF_COLUMNS], float Threshold)
{
unsigned char dst[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char * ptrToImage;
ptrToImage = dst[0];
// Kernels for sobel operator
int Kernel_X[3][3] = { { -1, 0, 1 },{ -2, 0, 2 },{ -1, 0, 1 } };
int Kernel_Y[3][3] = { { 1, 2, 1 },{ 0, 0, 0 },{ -1, -2, -1 } };
// clears destination image
for (int pixel = 0; pixel < NUMBER_OF_ROWS*NUMBER_OF_COLUMNS; pixel++)
*ptrToImage++ = 0;
for (int row = 1; row < NUMBER_OF_ROWS - 1; row++)
for (int column = 1; column < NUMBER_OF_COLUMNS - 1; column++)
{
double Gtot = 0;
int Gx = 0;
int Gy = 0;
for (int x = -1; x <= 1; x++)
for (int y = -1; y <= 1; y++)
{
Gx += src[row + y][column + x] * Kernel_X[y + 1][x + 1];
Gy += src[row + y][column + x] * Kernel_Y[y + 1][x + 1];
}
Gtot = sqrt(double(Gx ^ 2 + Gy ^ 2));
if (Gtot >= Threshold)
dst[row][column] = 255;
else
dst[row][column] = 0;
}
for (int row = 0; row < NUMBER_OF_ROWS; row++)
{
for (int col = 0; col < NUMBER_OF_COLUMNS; col++)
{
src[row][col] = dst[row][col];
}
}
}

Gtot = sqrt(double(Gx ^ 2 + Gy ^ 2));
That's probably not doing what you expect. The operator ^ computes bit-wise xor, not power. In your case it simply flips the second bit of Gx and Gy. Squaring the variables can be done e.g. like this:
Gtot = sqrt(double(Gx * Gx + Gy * Gy));

Related

Issue with convolution kernels that add up to zero

I'm making an image editing program in c++ using sfml and tried to add image filters using:
int clamp(int value, int min, int max)
{
if (value < min)
return min;
if (value > max)
return max;
return value;
}
void MyImage::applyKernel(std::vector<std::vector<int>> kernel)
{
int index(0), tempx(0), tempy(0);
int wr(0), wg(0), wb(0), wa(0), sum(0);
auto newPixels = new sf::Uint8[this->size_y * this->size_x * 4];
// Calculate the sum of the kernel
for (int i = 0; i < kernel.size(); i++) {
for (int j = 0; j < kernel[i].size(); j++) {
sum += kernel[i][j];
}
}
for (int y = 0; y < this->size_y; y++) {
for (int x = 0; x < this->size_x; x++) {
/*
Calculate weighted sum from kernel
*/
wr = wg = wb = wa = 0;
for (int i = 0; i < kernel.size(); i++) {
for (int j = 0; j < kernel[i].size(); j++) {
/*
Calculates the coordinates of the kernel relative to the pixel we are changing
*/
tempx = x + (j - floor(kernel[i].size() / 2));
tempy = y + (i - floor(kernel.size() / 2));
//std::cout << "kernel=(" << j << ", " << i << "), pixel=(" << x << ", " << y << ") tempPos=(" << tempx << ", " << tempy << ")\n";
/*
This code below should have the effect of mirroring the image in the case the kernel coordinate is out of bounds (along the edge of the image)
*/
tempx = (tempx < 0) ? -1 * tempx : tempx;
tempy = (tempy < 0) ? -1 * tempy : tempy;
tempx = (tempx > this->size_x) ? x - (j - floor(kernel[i].size() / 2)) : tempx;
tempy = (tempy > this->size_y) ? y - (i - floor(kernel.size() / 2)) : tempy;
if (tempx >= 0 && tempx < this->size_x && tempy >= 0 && tempy < this->size_y) {
index = (((tempy * this->size_x) - tempy) + (tempx)) * 4;
wr += kernel[i][j] * this->pixels[index];
wg += kernel[i][j] * this->pixels[index + 1];
wb += kernel[i][j] * this->pixels[index + 2];
wa += kernel[i][j] * this->pixels[index + 3];
}
}
}
if (sum) {
wr /= sum;
wg /= sum;
wb /= sum;
wa /= sum;
}
index = (((y * this->size_x) - y) + (x)) * 4;
newPixels[index] = clamp(wr, 0, 255); // Red
newPixels[index + 1] = clamp(wg, 0, 255); // Green
newPixels[index + 2] = clamp(wb, 0, 255); // Blue
newPixels[index + 3] = clamp(wa, 0, 255); // Alpha
}
}
this->pixels = newPixels;
// Copies the data from our sf::Uint8 array to the image object to be displayed => Removes the overhead of calling setPixel(x,y,color) for every pixel {As a side note setPixel() should always be avoided}|
this->im->create(this->size_x, this->size_y, this->pixels);
}
I was trying to use [-1,-1,-1], [-1,8,-1]. [-1,-1,-1] for edge detection but just ended up with a white image except for some pixels near the bottom. I've tried different images and kernels out but any that add to 0 don't work. For example if I take the edge detection kernel above and change the 8 to a 9, it gives an expected result. Is there something wrong with my idea of how convolution kernels work or is it just a bug in my code?
Thank you.

Laplacian Filter opencv c++

I was learning filters in OpenCV, but I'm a little confused about the Laplacian filter. My result is very different from the Laplacian filter in OpenCV lib.
For first, I use a Gaussian filter for the image:
Mat filtroGauss(Mat src){
Mat gauss = src.clone();
Mat temp(src.rows+2,src.cols+2,DataType<uchar>::type);
int y,x;
for (y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++) temp.at<uchar>(y+1,x+1) = src.at<uchar>(y,x);
}
int mask[lenMask*lenMask];
mask[0] = mask[2] = mask[6] = mask[8] = 1;
mask[1] = mask[3] = mask[5] = mask[7] = 2;
mask[4] = 4;
int denominatore = 0;
for (int i=0; i<lenMask*lenMask; i++) denominatore += mask[i];
int value[lenMask*lenMask];
for(y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++){
value[0] = temp.at<uchar>(y-1,x-1)*mask[0];
value[1] = temp.at<uchar>(y-1,x)*mask[1];
value[2] = temp.at<uchar>(y-1,x+1)*mask[2];
value[3] = temp.at<uchar>(y,x-1)*mask[3];
value[4] = temp.at<uchar>(y,x)*mask[4];
value[5] = temp.at<uchar>(y,x+1)*mask[5];
value[6] = temp.at<uchar>(y+1,x-1)*mask[6];
value[7] = temp.at<uchar>(y+1,x)*mask[7];
value[8] = temp.at<uchar>(y+1,x+1)*mask[8];
int avg = 0;
for(int i=0; i<lenMask*lenMask; i++)avg+=value[i];
avg = avg/denominatore;
gauss.at<uchar>(y,x) = avg;
}
}
return gauss;
}
Then I use the Laplacian function:
L(y,x) = f(y-1,x) + f(y+1,x) + f(y,x-1) + f(y,x+1) + 4*f(y,x)
Mat filtroLaplace(Mat src){
Mat output = src.clone();
Mat temp = src.clone();
int y,x;
for (y =1; y<src.rows-1; y++){
for(x =1; x<src.cols-1; x++){
output.at<uchar>(y,x) = temp.at<uchar>(y-1,x) + temp.at<uchar>(y+1,x) + temp.at<uchar>(y,x-1) + temp.at<uchar>(y,x+1) -4*( temp.at<uchar>(y,x));
}
}
return output;
}
And here is the final result from my code:
OpenCV result:
Let's rewrite the function a little, so it's easier to discuss:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = sum;
}
}
return output;
}
The source of your problem is sum. Let's examine its range in scope of this algorithm, by taking the two extremes:
Black pixel, surrounded by 4 white. That means 255 + 255 + 255 + 255 - 4 * 0 = 1020.
White pixel, surrounded by 4 black. That means 0 + 0 + 0 + 0 - 4 * 255 = -1020.
When you perform output.at<uchar>(y, x) = sum; there's an implicit cast of the int back to unsigned char -- the high order bits simply get chopped off and the value overflows.
The correct approach to handle this situation (which OpenCV takes), is to perform saturation before the actual cast. Essentially
if (sum < 0) {
sum = 0;
} else if (sum > 255) {
sum = 255;
}
OpenCV provides function cv::saturate_cast<T> to do just this.
There's an additional problem that you're not handling the edge rows/columns of the input image -- you just leave them at the original value. Since you're not asking about that, I'll leave solving that as an excercise to the reader.
Code:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = cv::saturate_cast<uchar>(sum);
}
}
return output;
}
Sample input:
Output of corrected filtroLaplace:
Output of cv::Laplacian:

Gaussian Blur image processing c++

after trying to implement a Gaussian blur for an image i have ran into a problem where the output image looks like multiple blurred versions of the original image (input image)
I have too low of a reputation to post images so have no idea how to fully show you what is happening however, i can post a gyazo link to the image:
https://gyazo.com/38fbe1abd442a3167747760866584655 - Original,
https://gyazo.com/471693c49917d3d3e243ee4156f4fe12 - Output
Here is some code:
int kernel[3][3] = { 1, 2, 1,
2, 4, 2,
1, 2, 1 };
void guassian_blur2D(unsigned char * arr, unsigned char * result, int width, int height)
{
for (int row = 0; row < height; row++)
{
for (int col = 0; col < width; col++)
{
for (int k = 0; k < 3; k++)
{
result[3 * row * width + 3 * col + k] = accessPixel(arr, col, row, k, width, height);
}
}
}
}
int accessPixel(unsigned char * arr, int col, int row, int k, int width, int height)
{
int sum = 0;
int sumKernel = 0;
for (int j = -1; j <= 1; j++)
{
for (int i = -1; i <= 1; i++)
{
if ((row + j) >= 0 && (row + j) < height && (col + i) >= 0 && (col + i) < width)
{
int color = arr[(row + j) * 3 * width + (col + i) * 3 + k];
sum += color * kernel[i + 1][j + 1];
sumKernel += kernel[i + 1][j + 1];
}
}
}
return sum / sumKernel;
}
Image is saved:
guassian_blur2D(inputBuffer, outputBuffer, width, height);
//Save the processed image
outputImage.convertToType(FREE_IMAGE_TYPE::FIT_BITMAP);
outputImage.convertTo24Bits();
outputImage.save("appleBlur.png");
cout << "Blur Complete" << endl;
Any help would be great, if this also helps i am trying to store the image as a grey-scale so that no colour is saved.
Looks like the problem is not within your blurring code, and is related to saving or accessing image data.
I have used OpenCV to read/save images, and got expected result. Here's a snippet:
cv::Mat3b img = cv::imread("path_to_img.png");
cv::Mat3b out = img.clone();
guassian_blur2D(img.data, out.data, img.cols, img.rows);
cv::imshow("img", img);
cv::imshow("out", out);
cv::waitKey(0);
And here are input and output images:
The blur is not very noticeable (due to high image resolution and small kernel), but if you look carefully - it looks correct.

Sobel Edge Detection without a buffer

For an embedded design I am attempting to implement sobel's edge detection on a board without the use of a buffer. i.e. I am reading and writing directly from the screen. I can however, store about one or two imge width full of data to be referenced later. This is due to limitations set forth by the board. However I have fallen into some issue. All that I recieve is noise regardless if I attempt to do sobel or another edge detection algorithm. The code is below, does anyone have any suggestions
Version 1
void sobelEdgeDetection2() {
int GX[3][3];
int GY[3][3];
int sumX[3];
int sumY[3];
int SUM[3];
int piX = 0;
int piY = 0;
//uint8_t R, G, B = 0;
int I, J = 0;
//UnpackedColour pixVal;
uint16_t *buffer;
// allocate space for even scan lines and odd scan lines
buffer = new uint16_t[_gl->getWidth()];
//buffer for previous line
uint16_t *bufT;
// allocate space for even scan lines and odd scan lines
bufT = new uint16_t[_gl->getWidth()];
// Masks //////////////////////////////////////
//X//
GX[0][0] = -1;
GX[0][1] = 0;
GX[0][2] = 1;
GX[1][0] = -2;
GX[1][1] = 0;
GX[1][2] = 2;
GX[2][0] = -1;
GX[2][1] = 0;
GX[2][2] = 1;
//Y//
GY[0][0] = 1;
GY[0][1] = 2;
GY[0][2] = 1;
GY[1][0] = 0;
GY[1][1] = 0;
GY[1][2] = 0;
GY[2][0] = -1;
GY[2][1] = -2;
GY[2][2] = -1;
for (int Y = 0; Y < _gl->getHeight(); Y++) {
for (int X = 0; X < _gl->getWidth(); X++) {
sumX[0] = sumX[1] = sumX[2] = 0;
sumY[0] = sumY[1] = sumY[2] = 0;
if (Y == 0 || Y == _gl->getHeight() - 1) {
SUM[0] = SUM[1] = SUM[2] = 0;
} else if (X == 0 || X == _gl->getWidth() - 1) {
SUM[0] = SUM[1] = SUM[2] = 0;
} else {
for (I = -1; I <= 1; I++) {
for (J = -1; J <= 1; J++) {
piX = J + X;
piY = I + Y;
pixel16 pix = getPixel(piX, piY);
uint8_t Red = pix.Red;
uint8_t Green = pix.Green;
uint8_t Blue = pix.Blue;
sumX[0] += (Red) * GX[J + 1][I + 1];
sumX[1] += (Green) * GX[J + 1][I + 1];
sumX[2] += (Blue) * GX[J + 1][I + 1];
sumY[0] += (Red) * GY[J + 1][I + 1];
sumY[1] += (Green) * GY[J + 1][I + 1];
sumY[2] += (Blue) * GY[J + 1][I + 1];
}
}
SUM[0] = abs(sumX[0]) + abs(sumY[0]);
SUM[1] = abs(sumX[1]) + abs(sumY[1]);
SUM[2] = abs(sumX[2]) + abs(sumY[2]);
}
if (SUM[0] > 255)
SUM[0] = 255;
if (SUM[0] < 0)
SUM[0] = 0;
if (SUM[1] > 255)
SUM[1] = 255;
if (SUM[1] < 0)
SUM[1] = 0;
if (SUM[2] > 255)
SUM[2] = 255;
if (SUM[2] < 0)
SUM[2] = 0;
int newPixel[3];
newPixel[0] = (255 - ((unsigned char) (SUM[0])));
newPixel[1] = (255 - ((unsigned char) (SUM[1])));
newPixel[2] = (255 - ((unsigned char) (SUM[2])));
pixel16 pix(newPixel[0], newPixel[1], newPixel[2]);
buffer[X] = packColour(pix).packed565;
}
//Need to move cursor back
// draw it
this->paintRow(Point(0, Y), buffer, _gl->getWidth());
}
delete[] buffer;
}
Version2
/**
* https://www.cl.cam.ac.uk/projects/raspberrypi/tutorials/image-processing/edge_detection.html
* 1 Iterate over every pixel in the image
* 2 Apply the x gradient kernel
* 3 Apply the y gradient kernel
* 4 Find the length of the gradient using pythagoras' theorem
* 5 Normalise the gradient length to the range 0-255
* 6 Set the pixels to the new values
*/
void sobelEdgeDetection4() {
UnpackedColour colour;
for (int x = 1; x < _gl->getWidth() - 1; x++) {
for (int y = 1; y < _gl->getHeight() - 1; y++) {
// initialise Gx and Gy to 0
int Gx = 0;
int Gy = 0;
unsigned int intensity = 0;
// Left column
pixel16 pixel = this->getPixel(x - 1, y - 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += -intensity;
Gy += -intensity;
pixel = this->getPixel(x - 1, y);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += -2 * intensity;
pixel = this->getPixel(x - 1, y + 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += -intensity;
Gy += +intensity;
// middle column
pixel = this->getPixel(x, y - 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gy += -2 * intensity;
pixel = this->getPixel(x, y + 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gy += +2 * intensity;
// right column
pixel = this->getPixel(x + 1, y - 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += +intensity;
Gy += -intensity;
pixel = this->getPixel(x + 1, y);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += +2 * intensity;
pixel = this->getPixel(x + 1, y + 1);
intensity = pixel.Red + pixel.Green + pixel.Blue;
Gx += +intensity;
Gy += +intensity;
// calculate the gradient length
unsigned int length = (unsigned int) sqrt(
(float) (Gx * Gx) + (float) (Gy * Gy));
// normalise the length to 0 to 255
length = length / 17;
// draw the pixel on the edge image
pixel16 pixel2(length,length,length);
this->setPixel(x, y, pixel2);
}
}
}
Version 3
// sobel map for the x axis
const double _SOBEL_Gx[3][3] = { { -1.0, +0.0, +1.0 }, { -2.0, +0.0, +2.0 },
{ -1.0, +0.0, +1.0 } };
// sobel map for the y axis
const double _SOBEL_Gy[3][3] = { { +1.0, +2.0, +1.0 }, { +0.0, +0.0, +0.0 },
{ -1.0, -2.0, -1.0 } };
double get_sobel_gradient(int width, int height, int x, int y) {
double sobel_gradient_x = 0, sobel_gradient_y = 0;
int mx = 0, my = 0, sx = 0, sy = 0;
for (mx = x; mx < x + 3; mx++) {
sy = 0;
for (my = y; my < y + 3; my++) {
if (mx < width && my < height) {
//int r, g, b, idx;
int idx = (mx + width * my) * 3;
pixel16 pixVal = this->getPixel(idx);
//r = pixVal.Red;
//g = pixVal.Green;
//b = pixVal.Blue;
UnpackedColour col = this->packColour(pixVal);
sobel_gradient_x += col.packed565 * _SOBEL_Gx[sx][sy];
sobel_gradient_y += col.packed565 * _SOBEL_Gy[sx][sy];
}
sy++;
}
sx++;
}
return abs(sobel_gradient_x) + abs(sobel_gradient_y);
}
void sobelEdgeDetection3() {
double threshold = 50000.0;
UnpackedColour colour;
for (int y = 0; y < _gl->getHeight(); y++) {
for (int x = 0; x < _gl->getWidth(); x++) {
if (get_sobel_gradient(_gl->getWidth(), _gl->getHeight(), x, y)
>= threshold) {
colour.packed565 = 0x0000; //set white
} else {
colour.packed565 = 0xFFFF; //set black
}
this->setPixel(x, y, colour);
}
}
}
For Version 1, after you allocate 2 buffers (just use buffer and bufT), create 2 pointers to point to the current and previous rows, like this:
uint16_t *currentRow = buffer;
uint16_t *prevRow = bufT;
Inside the row loop, write to currentRow instead of buffer:
pixel16 pix(newPixel[0], newPixel[1], newPixel[2]);
currentRow[X] = packColour(pix).packed565;
Because the Sobel filter reads from the previous row, you can't overwrite a row until after you have finished calculating the filtered values for the row after it. So at the end of the loop, where you are currently calling paintRow(), draw the previous row (if one exists), and then swap the buffers so that the current becomes the previous, and the previous becomes the new current row (to be overwritten on the next pass through the loop). On the last row the current row is also drawn, because otherwise it won't be since the outer loop is about to terminate.
if(Y > 0) // draw the previous row if this is not the first row:
this->paintRow(Point(0, Y-1), prevRow, _gl->getWidth());
if(Y == _gl->getHeight()-1) // draw the current row if it is the last:
this->paintRow(Point(0, Y), currentRow, _gl->getWidth());
// swap row pointers:
uint16_t *temp = prevRow;
prevRow = currentRow;
currentRow = temp;
The same strategy should work for the other versions.

opencv filter on multi-dimension Mat

i want to transport the follow codes into c++:
gaussFilter = fspecial('gaussian', 2*neighSize+1, 0.5*neighSize);
pointFeature = imfilter(pointFeature, gaussFilter, 'symmetric');
where the pointFeature is a [height, width, 24] array.
i try to use filter2D, but it only support the 2D array.
so i want to know if there are functions in opencv that can filtering the multi-dimensional array?
You can use separable kernel filters for make anydimentional filter.
If you are using OpenCV, you could try this for a 3 Dimensional MatND:
void Smooth3DHist(cv::MatND &hist, const int& kernDimension)
{
assert(hist.dims == 3);
int x_size = hist.size[0];
int y_size = hist.size[1];
int z_size = hist.size[2];
int xy_size = x_size*y_size;
cv::Mat kernal = cv::getGaussianKernel(kernDimension, -1, CV_32F);
// Filter XY dimensions for every Z
for (int z = 0; z < z_size; z++)
{
float *ind = (float*)hist.data + z * xy_size; // sub-matrix pointer
cv::Mat subMatrix(2, hist.size, CV_32F, ind);
cv::sepFilter2D(subMatrix, subMatrix, CV_32F, kernal.t(), kernal, Point(-1,-1), 0.0, cv::BORDER_REPLICATE);
}
// Filter Z dimension
float* kernGauss = (float *)kernal.data;
unsigned kernSize = kernal.total();
int kernMargin = (kernSize - 1)/2;
float* lineBuffer = new float[z_size + 2*kernMargin];
for (int y = 0; y < y_size; y++)
{
for (int x = 0; x < x_size; x++)
{
// Copy along Z dimension into a line buffer
float* z_ptr = (float*)hist.data + y * x_size + x;//same as hist.ptr<float>(0, y, x)
for (int z = 0; z < z_size; z++, z_ptr += xy_size)
{
lineBuffer[z + kernMargin] = *z_ptr;
}
// Replicate borders
for (int m = 0; m < kernMargin; m++)
{
lineBuffer[m] = lineBuffer[kernMargin];// replicate left side
lineBuffer[z_size + 2*kernMargin - 1 - m] = lineBuffer[kernMargin + z_size - 1];//replicate right side
}
// Filter line buffer 1D - convolution
z_ptr = (float*)hist.data + y * x_size + x;
for (int z = 0; z < z_size; z++, z_ptr += xy_size)
{
*z_ptr = 0.0f;
for (unsigned k = 0; k < kernSize; k++)
{
*z_ptr += lineBuffer[z+k]*kernGauss[k];
}
}
}
}
delete [] lineBuffer;
}