Separable sobel filter implementation openCV C++ - c++

I am working on creating my own implementation of a separable sobel filter implementation. My function has as input the kernelSize, the horizontal filter of gradient Y as pixelsY1, the vertical filter of gradient Y as pixelsY2, the horizontal filter of gradient X as pixelsX1, the vertical filter of gradient X as pixelsX2.
The input of X1 is [1, 0, -1] (horizontal)
The input of X2 is [1, 2, 1] (vertical)
The input of Y1 is [1, 2, 1] (horizontal)
The input of Y2 is [1, 0 -1] (vertical)
void gradientFilter1D(Mat& img, int kernelSize, vector<double> pixelsY1, vector<double> pixelsY2, vector<double> pixelsX1, vector<double> pixelsX2)
{
int sumMin = INT_MAX, sumMax = INT_MIN;
//gradient X
vector<vector<int>> pixelsX(img.rows, vector<int>(img.cols, 0));
//gradient Y
vector<vector<int>> pixelsY(img.rows, vector<int>(img.cols, 0));
vector<vector<int>> sumArray(img.rows, vector<int>(img.cols, 0));
for (int j = kernelSize / 2; j < img.rows - kernelSize / 2; j++)
{
for (int i = kernelSize / 2; i < img.cols - kernelSize / 2; i++)
{
double totalX = 0;
double totalY = 0;
//this is the horizontal multiplication
for (int x = -kernelSize / 2; x <= kernelSize / 2; x++)
{
totalY += img.at<uchar>(j, i + x) * pixelsY1[x + (kernelSize / 2)];
totalX += img.at<uchar>(j, i + x) * pixelsX1[x + (kernelSize / 2)];
//cout << int(img.at<uchar>(j, i + x)) << " " << pixelsY1[x + (kernelSize / 2)] << endl;
}
pixelsX[j][i] = totalX;
pixelsY[j][i] = totalY;
}
}
for (int j = kernelSize / 2; j < img.rows - kernelSize / 2; j++)
{
for (int i = kernelSize / 2; i < img.cols - kernelSize / 2; i++)
{
double totalX = 0;
double totalY = 0;
//this is the vertical multiplication
for (int x = -kernelSize / 2; x <= kernelSize / 2; x++)
{
totalY += pixelsY[j + x][i] * pixelsY2[x + (kernelSize / 2)];
totalX += pixelsX[j + x][i] * pixelsX2[x + (kernelSize / 2)];
//cout << int(img.at<uchar>(j, i + x)) << " " << pixelsY1[x + (kernelSize / 2)] << endl;
}
pixelsX[j][i] = totalX;
pixelsY[j][i] = totalY;
}
}
for (int j = 0; j < img.rows; j++)
{
for (int i = 0; i < img.cols; i++)
{
int sum;
sum = sqrt(pow(pixelsX[j][i], 2) + pow(pixelsY[j][i], 2));
sumArray[j][i] = sum;
sumMin = sumMin < sum ? sumMin : sum;
sumMax = sumMax > sum ? sumMax : sum;
}
}
//normalization
for (int j = 0; j < img.rows; j++)
for (int i = 0; i < img.cols; i++)
{
sumArray[j][i] = (sumArray[j][i] - sumMin) * ((255.0 - 0) / (sumMax - sumMin)) + 0;
img.at<uchar>(j, i) = sumArray[j][i];
}
}
Input Image:
Output Image:
What am I doing wrong?

The separable filter is computed in what are effectively two passes. (The passes can be interleaved, but all the values used by the vertical filter have to have already been computed by the horizontal filter if doing it in that order.) Right below the comment //then here I do the vertical multiplication there are accesses to pixelsX and pixelsY that are effectively a second pass of the separable filter. The values being accessed for negative values of x have been previously computed and the ones for positive values of x have not yet been computed by the horizontal pass.
check out Halide. It makes this sort of code a lot easier and more performant. (A double nesting of std::vector is not a good way to go.)

Okay, so my mistake was actually in this
for (int j = kernelSize / 2; j < img.rows - kernelSize / 2; j++)
{
for (int i = kernelSize / 2; i < img.cols - kernelSize / 2; i++)
{
double totalX = 0;
double totalY = 0;
//this is the vertical multiplication
for (int x = -kernelSize / 2; x <= kernelSize / 2; x++)
{
totalY += pixelsY[j + x][i] * pixelsY2[x + (kernelSize / 2)];
totalX += pixelsX[j + x][i] * pixelsX2[x + (kernelSize / 2)];
//cout << int(img.at<uchar>(j, i + x)) << " " << pixelsY1[x + (kernelSize / 2)] << endl;
}
pixelsX[j][i] = totalX; <---- I overwrite the old values
pixelsY[j][i] = totalY; <--- I overwrite the old values
}
}
So, pixelsX[j][i] = totalX and so forth is wrong, because I need the old values in order to finish the computation in the rest of the j, and i loops. So, I created another vector of vectors and pushed in it the totalX's and Y's, and this solved my issue.

Related

Issue with convolution kernels that add up to zero

I'm making an image editing program in c++ using sfml and tried to add image filters using:
int clamp(int value, int min, int max)
{
if (value < min)
return min;
if (value > max)
return max;
return value;
}
void MyImage::applyKernel(std::vector<std::vector<int>> kernel)
{
int index(0), tempx(0), tempy(0);
int wr(0), wg(0), wb(0), wa(0), sum(0);
auto newPixels = new sf::Uint8[this->size_y * this->size_x * 4];
// Calculate the sum of the kernel
for (int i = 0; i < kernel.size(); i++) {
for (int j = 0; j < kernel[i].size(); j++) {
sum += kernel[i][j];
}
}
for (int y = 0; y < this->size_y; y++) {
for (int x = 0; x < this->size_x; x++) {
/*
Calculate weighted sum from kernel
*/
wr = wg = wb = wa = 0;
for (int i = 0; i < kernel.size(); i++) {
for (int j = 0; j < kernel[i].size(); j++) {
/*
Calculates the coordinates of the kernel relative to the pixel we are changing
*/
tempx = x + (j - floor(kernel[i].size() / 2));
tempy = y + (i - floor(kernel.size() / 2));
//std::cout << "kernel=(" << j << ", " << i << "), pixel=(" << x << ", " << y << ") tempPos=(" << tempx << ", " << tempy << ")\n";
/*
This code below should have the effect of mirroring the image in the case the kernel coordinate is out of bounds (along the edge of the image)
*/
tempx = (tempx < 0) ? -1 * tempx : tempx;
tempy = (tempy < 0) ? -1 * tempy : tempy;
tempx = (tempx > this->size_x) ? x - (j - floor(kernel[i].size() / 2)) : tempx;
tempy = (tempy > this->size_y) ? y - (i - floor(kernel.size() / 2)) : tempy;
if (tempx >= 0 && tempx < this->size_x && tempy >= 0 && tempy < this->size_y) {
index = (((tempy * this->size_x) - tempy) + (tempx)) * 4;
wr += kernel[i][j] * this->pixels[index];
wg += kernel[i][j] * this->pixels[index + 1];
wb += kernel[i][j] * this->pixels[index + 2];
wa += kernel[i][j] * this->pixels[index + 3];
}
}
}
if (sum) {
wr /= sum;
wg /= sum;
wb /= sum;
wa /= sum;
}
index = (((y * this->size_x) - y) + (x)) * 4;
newPixels[index] = clamp(wr, 0, 255); // Red
newPixels[index + 1] = clamp(wg, 0, 255); // Green
newPixels[index + 2] = clamp(wb, 0, 255); // Blue
newPixels[index + 3] = clamp(wa, 0, 255); // Alpha
}
}
this->pixels = newPixels;
// Copies the data from our sf::Uint8 array to the image object to be displayed => Removes the overhead of calling setPixel(x,y,color) for every pixel {As a side note setPixel() should always be avoided}|
this->im->create(this->size_x, this->size_y, this->pixels);
}
I was trying to use [-1,-1,-1], [-1,8,-1]. [-1,-1,-1] for edge detection but just ended up with a white image except for some pixels near the bottom. I've tried different images and kernels out but any that add to 0 don't work. For example if I take the edge detection kernel above and change the 8 to a 9, it gives an expected result. Is there something wrong with my idea of how convolution kernels work or is it just a bug in my code?
Thank you.

How do I handle edge pixels from a image without any libraries but the standart ones from C++?

I have developed a code that can read and handle the bits from a 24 bits bmp image, mostly applying filters, but now I want to make my blur filter to blur the edge pixels too. Right now I have a 1 pixel edge, I'm using a 3x3 box blur, and this is the image I get after the blur is applied:
https://i.stack.imgur.com/0Px6Z.jpg
I'm able to keep the original bits from the image if I use an if statement in my inner loop but that doesn't really help given that I want it to be blurred and not the original unblurred bits.
Here is the code:
>
for (int count = 0; count < times; ++count) {
for (int x = 1; x < H-1; ++x) {
for (int y = 1; y < W-1; ++y) {
double sum1 = 0;
double sum2 = 0;
double sum3 = 0;
for (int k = -1; k <= 1; ++k) {
for (int j = -1; j <= 1; ++j) {
sum1 += bits[((x - j) * W + (y - k)) * 3] * kernel[j + 1][k + 1];
sum2 += bits[((x - j) * W + (y - k)) * 3 + 1] * kernel[j + 1][k + 1];
sum3 += bits[((x - j) * W + (y - k)) * 3 + 2] * kernel[j + 1][k + 1];
}
}
if (sum1 <= 0) sum1 = 0;
if (sum1 >= 255) sum1 = 255;
if (sum2 <= 0) sum2 = 0;
if (sum2 >= 255) sum2 = 255;
if (sum3 <= 0) sum3 = 0;
if (sum3 >= 255) sum3 = 255;
temp[(x * W + y) * 3] = sum1;
temp[(x * W + y) * 3 + 1] = sum2;
temp[(x * W + y) * 3 + 2] = sum3;
}
}
bits = temp;
}
I know that 5 for loops nested are really slow but I would like to be able to make it work properly first, but if there are any tips on how to improve it I'm all ears.
Now as for the first loop, what it does is it applies the filter the amount of times you want.
The next two is to go through the vector as a 2d vector, and the inner 2 are for the box blur.
Important things to know: I have a vector of bits(RGB) and not just the pixels, that is why I treat them one by one(bits), also my vector is a 1d vector.

Drawing the top and bottom of a cylinder

I'm trying to create a class that can procedurally create prisms (or cylinders if the precision is high enough) but only the sides of the 3d model are showing (not the top and bottom). This is using openGL and c++. Not going for efficiency, just modifying a previous class that made a sphere.
#define numSlices 2
Prism::Prism() {
init(3);
}
Prism::Prism(int prec) {
init(prec);
}
float Prism::toRadians(float degrees) { return (degrees * 2.0f * 3.14159f) / 360.0f; }
void Prism::init(int prec) {
prec = (prec < 3) ? 3 : prec;
numVertices = (prec + 1) * (numSlices+1);
numIndices = prec * numSlices * 6;
for (int i = 0; i < numVertices; i++) { vertices.push_back(glm::vec3()); }
for (int i = 0; i < numVertices; i++) { texCoords.push_back(glm::vec2()); }
for (int i = 0; i < numVertices; i++) { normals.push_back(glm::vec3()); }
for (int i = 0; i < numVertices; i++) { tangents.push_back(glm::vec3()); }
for (int i = 0; i < numIndices; i++) { indices.push_back(0); }
// calculate triangle vertices
for (int i = 0; i <= numSlices; i++) {
for (int j = 0; j <= prec; j++) {
float y = i;
float x = -(float)cos(toRadians(j * 360.0f / (float)prec));
float z = (float)sin(toRadians(j * 360.0f / (float)prec));
vertices[i * (prec + 1) + j] = glm::vec3(x, y, z);
texCoords[i * (prec + 1) + j] = glm::vec2(((float)j / prec), ((float)i / numSlices));
}
}
// calculate triangle indices
for (int i = 0; i < numSlices; i++) {
for (int j = 0; j < prec; j++) {
indices[6 * (i * prec + j) + 0] = i * (prec + 1) + j;
indices[6 * (i * prec + j) + 1] = i * (prec + 1) + j + 1;
indices[6 * (i * prec + j) + 2] = (i + 1) * (prec + 1) + j;
indices[6 * (i * prec + j) + 3] = i * (prec + 1) + j + 1;
indices[6 * (i * prec + j) + 4] = (i + 1) * (prec + 1) + j + 1;
indices[6 * (i * prec + j) + 5] = (i + 1) * (prec + 1) + j;
}
}
}
Any tips or solutions that stick closely to the code already written would much appreciated.
To render the top and bottom of the cylinder, you can create a "triangle fan" that starts from a vertex at the center of the top/bottom of the cylinder and creates one triangle for every side.
Adapting your code: (untested, I may have made mistakes against winding order)
int bottom_center = vertices.length(); vertices.push_back(glm::vec3(0,0,0));
int top_center = vertices.length(); vertices.push_back(glm::vec3(0,numSlices,0));
// Bottom
for (int j = 0; j < prec; j++) {
int base = 0;
indices.push_back(bottom_center);
indices.push_back(base+j);
indices.push_back(base+j+1);
}
// Top
for (int j = 0; j < prec; j++) {
int base = numSlices * (prec+1);
indices.push_back(top_center);
indices.push_back(base+j);
indices.push_back(base+j+1);
}
See http://www.songho.ca/opengl/gl_cylinder.html for a more worked-out example.

Unexpected Harris Detector results

I load the vertical and horizontal gradients into the function posted here and it calculates the sums which than make up the corner response. Why do only boarder pixels get to be found, my threshold is 0 otherwise there is 0 corners on the image. For gradients I used sobel operator.
Look at the output image below.
for (int i = 0; i < width; i++)
{
for (int j = 0; j < height; j++)
{
if ((i - search_size / 2 < 0 || i + search_size / 2 > image1.rows - 1) || (j - search_size / 2 < 0 || j + search_size / 2 > image1.cols - 1)) {
continue;
}
double Ix2 = 0, Iy2 = 0, Ixy = 0;
double detM=0;
double traceM=0;
double R = 0;
for (int m = i-search_size /2; m < i + search_size /2 ; m++){
for (int n = j-search_size /2; n < j + search_size/2 ; n++){
gauss = exp(-(((i - m) * (i - m)) + ((j - n) * (j - n))) / gaus_del);
//Compute Ix^2 , Iy^2 and Ixy
Ix2 += gauss*(image1.at<float>(m, n)*image1.at<float>(m, n));
Iy2 += gauss*(image2.at<float>(m, n)*image2.at<float>(m, n));
Ixy += gauss*(image1.at<float>(m, n)*image2.at<float>(m, n));
}
}
detM = (Ix2*Iy2 - Ixy*Ixy);
traceM = Ix2*Ix2 + Iy2*Iy2;
R = detM / traceM;
//cout <<i+j<< endl;
// std::cout << "R :" << Iy2 << endl;
if (R > threshold)
{
circle(image, cv::Point2f(i, j), 3.5, cv::Scalar(255, 255, 0), 1, 5);
cout << "corner found" << endl;
}
}
}
EDIT : i am using uchars now and the result looks alot better
2

Histogram of oriented gradiants

For a project I'm writing some code to compute the HoG of some images, but I'm stuck with the fact that my orientations are only between 0 ~ 90 degrees, while using the atan2 function.
I'm guessing that this problem occurs due to the filter2D function of OpenCV but I'm not sure if this is the reason or that I'm doing something else wrong:
Vector<Vector<Mat_<float>>> HoG(Mat image) {
Mat img_x;
Mat img_y;
IplImage img = image;
Mat kern_x = (Mat_<char>(1, 3) << -1, 0, 1);
Mat kern_y = (Mat_<char>(3, 1) << -1, 0, 1);
filter2D(image, img_x, image.depth(), kern_x);
filter2D(image, img_y, image.depth(), kern_y);
Vector<Vector<Mat_<float>>> histograms;
for(int y = 0; y < image.rows - size; y += size) {
Vector<Mat_<float>> temp_hist;
for(int x = 0; x < image.cols - size; x += size) {
float total_mag = 0;
Mat hist = Mat::zeros(1, 8, CV_32FC1);
for(int i = y; i < y + size; ++i) {
for(int j = x; j < x + size; ++j) {
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
double ori = myatan2(grad_x, grad_y);
float mag = sqrt(pow(grad_x, 2) + pow(grad_y, 2));
int bin = round(ori/45);
hist.at<float>(0, (bin - 1 < 0 ? 7 : bin - 1)) += - (float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
hist.at<float>(0, bin) += -(float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
total_mag += mag;
}
}
// Normalize the histogram
for(int i = 0; i < 8; ++i) {
hist.at<float>(0, i) = hist.at<float>(0, i) / total_mag;
}
temp_hist.push_back(hist);
}
histograms.push_back(temp_hist);
}
return histograms;
}
If you have any other tips to increase a speed-up in my code or something else that is also welcome of course.
I notice this:
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
You seem to be using uchar. Should this not be char?