Dilation Image on OpenCV C/C++ - c++

I want to create a dilation image using a kernel that runs through the entire image and checks if the kernel zone has 0, if so, it gives the new image a pixel of 255. My code is giving me an all black dst and I don't know why.
This is the code:
Mat vcpi_binary_dilate(Mat src)
{
if (src.empty())
{
cout << "Failed to load image.";
return src;
}
Mat dst(src.rows, src.cols, CV_8UC1, Scalar(0));
const int kernnel = 3;
int array[kernnel * kernnel] = {};
for (int y = kernnel / 2; y < src.cols - kernnel / 2; y++)
{
for (int x = kernnel / 2; x < src.rows - kernnel / 2; x++)
for (int yk = -kernnel / 2; yk <= kernnel / 2; yk++)
{
for (int xk = -kernnel / 2; xk <= kernnel / 2; xk++)
{
if (src.at<uchar>(y + yk, x + xk) == 0)
{
dst.at<uchar>(y + yk, x + xk) = 255;
}
}
}
}
imshow("Image ", src);
imshow("Image dilate", dst);
waitKey(0);
return dst;
}
I hope to have an output image of this type.

I am not sure about the algorithm you are trying to implement.
But there is one thing that is definately wrong:
The image dimensions are mixed up-
cols is the width and corresponds to the x axis.
rows is the height and corresponds to the y axis.
This is causing you to access the images using cv::Mat::at with invalid coordinates.
Therefore you need to change:
for (int y = kernnel / 2; y < src.cols - kernnel / 2; y++)
{
for (int x = kernnel / 2; x < src.rows - kernnel / 2; x++)
{
To:
//--------------------------------vvvv--------------------
for (int y = kernnel / 2; y < src.rows - kernnel / 2; y++)
{
//------------------------------------vvvv--------------------
for (int x = kernnel / 2; x < src.cols - kernnel / 2; x++)
{
Note that this is consistent with your calls ...at<uchar>(y + yk, x + xk), since cv::Mat::at expects the row (i.e. y coordinate) first.
A side note: Why is "using namespace std;" considered bad practice?.
Edit:
After having a look at the matlab code in your comment, which applies an algorithm different than what you described in your question, you'll need to do the following changes:
Call something equivalent to matlab's im2bw.
Update dst current pixel, not the one in the neighborhood.
Maybe something like:
cv::Mat vcpi_binary_dilate(cv::Mat src) {
if (src.empty()) {
std::cout << "Failed to load image.";
return src;
}
cv::threshold(src, src, 127, 255, CV_THRESH_BINARY);
cv::Mat dst(src.rows, src.cols, CV_8UC1, cv::Scalar(255)); // <-- Replacement for im2bw, might need tuning.
const int kernnel = 3;
int array[kernnel * kernnel] = {};
for (int y = kernnel / 2; y < src.rows - kernnel / 2; y++)
{
for (int x = kernnel / 2; x < src.cols - kernnel / 2; x++)
{
for (int yk = -kernnel / 2; yk <= kernnel / 2; yk++)
{
for (int xk = -kernnel / 2; xk <= kernnel / 2; xk++)
{
if (src.at<uchar>(y + yk, x + xk) == 0) {
dst.at<uchar>(y, x) = 0; // <-- Update the current pixel, not the one in the neighborhood.
}
}
}
}
}
cv::imshow("Image ", src);
cv::imshow("Image dilate", dst);
cv::waitKey(0);
return dst;
}

Related

Unsharp Masking opencv c++

I am new to opencv and I am performing unsharp masking by using using this criteria Image+(K*(Image-low pass filter)),however ,the resultant image may have values <0 or >255,i need to write a loop to scale that down.
I tried to write one but seemingly its in correct.
Here are the errors.
a) (k * (float)(src.at(y, x) - res.at(y, x))) can be negative, it is incorrect to do (uchar)(k * (float)(src.at(y, x) - res.at(y, x))).
b) src.at(y,x) + (k * (float)(src.at(y, x) - res.at(y, x))) can be greater than 255 and can be smaller than 0.
Can someone help me fix this,thanks in advance
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <iostream>
#include <stdlib.h>
#include <math.h>
using namespace std;
using namespace cv;
//the pixel lying outside the image i.e. (x – j, y – k) are reflected back into the image
int reflect(int M, int x)
{
if (x < 0)
{
return -x - 1;
}
if (x >= M)
{
return 2 * M - x - 1;
}
return x;
}
int circular(int M, int x)
{
if (x < 0)
return x + M;
if (x >= M)
return x - M;
return x;
}
void noBorderProcessing(Mat src, Mat res, float Kernel[][3])
{
float sum;
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
sum = 0.0;
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
sum = sum + Kernel[j + 1][k + 1] * src.at<uchar>(y - j, x - k);
}
}
res.at<uchar>(y, x) = sum;
}
}
}
void refletedIndexing(Mat src, Mat res, float Kernel[][3])
{
float sum, x1, y1;
for (int y = 0; y < src.rows; y++) {
for (int x = 0; x < src.cols; x++) {
sum = 0.0;
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
x1 = reflect(src.cols, x - j);
y1 = reflect(src.rows, y - k);
sum = sum + Kernel[j + 1][k + 1] * src.at<uchar>(y1, x1);
}
}
res.at<uchar>(y, x) = sum;
}
}
}
//coordinates that exceed the bounds of the image wrap around to the opposite side
void circularIndexing(Mat src, Mat res, float Kernel[][3])
{
float sum, x1, y1;
for (int y = 0; y < src.rows; y++) {
for (int x = 0; x < src.cols; x++) {
sum = 0.0;
for (int k = -1; k <= 1; k++) {
for (int j = -1; j <= 1; j++) {
x1 = circular(src.cols, x - j);
y1 = circular(src.rows, y - k);
sum = sum + Kernel[j + 1][k + 1] * src.at<uchar>(y1, x1);
}
}
res.at<uchar>(y, x) = sum;
}
}
}
int main()
{
Mat src, res,dst;
/// Load an image
src = cv::imread("Images4DZ/Gray_Win.bmp", cv::IMREAD_ANYDEPTH);
//low pass filtering
float Kernel[3][3] = {
{1 / 9.0, 1 / 9.0, 1 / 9.0},
{1 / 9.0, 1 / 9.0, 1 / 9.0},
{1 / 9.0, 1 / 9.0, 1 / 9.0}
};
res = src.clone();
for (int y = 0; y < src.rows; y++)
for (int x = 0; x < src.cols; x++)
res.at<uchar>(y, x) = 0.0;
circularIndexing(src, res, Kernel);
//Unsharpen Masking
dst = cv::Mat::zeros(res.rows, res.cols, CV_8UC1);
float k = 0.5;
for (int y = 0; y < res.rows; y++) {
for (int x = 0; x < res.cols; x++) {
dst.at<uchar>(y, x) = src.at<uchar>(y,x) + (uchar)(k * (float)(src.at<uchar>(y, x) - res.at<uchar>(y, x)));
}
}
imshow("Source Image", src);
imshow("Low Pass Filter", res);
imshow("Unsharpen Masking", dst);
waitKey();
return 0;
}

Warp Image by Diagonal Sine Wave

I'm trying to warp colour image using sin function in OpenCV and I was successful in doing so. However, how can I make a 'diagonal' warping using sine wave?
My code is this:
Mat result = src.clone();
for (int i = 0; i < src.rows; i++) { // to y
for (int j = 0; j < src.cols; j++) { // to x
for (int ch = 0; ch < 3; ch++) { // each colour
int offset_x = 0;
int offset_y = (int)(25.0 * sin(3.14 * j / 150));
if (i + offset_y < src.rows) {
result.at<Vec3b>(i, j)[ch] = src.at<Vec3b>((i + offset_y) % src.rows, j)[ch];
}
else
result.at<Vec3b>(i, j)[ch] = 0.0;
}
}
}
imshow("result", result);
How can I do this? Not drawing a sine graph, but warping an image.
Solved this! Several times ago, I've received a message by someone who told me that the image is stolen. It was from Google, actually, but I've deleted it to fulfill not to cause any situations. Thx!
I think it should look like this:
void deform()
{
float alpha = 45 * CV_PI / 180.0; // wave direction
float ox = cos(alpha);
float oy = sin(alpha);
cv::Mat src = cv::imread("F:/ImagesForTest/lena.jpg");
for (int i = 0; i < src.rows; i+=8)
{
cv::line(src, cv::Point(i, 0), cv::Point(i, src.rows),cv::Scalar(255,255,255));
}
for (int j = 0; j < src.cols; j += 8)
{
cv::line(src, cv::Point(0,j), cv::Point(src.cols,j), cv::Scalar(255, 255, 255));
}
cv::Mat result = src.clone();
for (int i = 0; i < src.rows; i++)
{ // to y
for (int j = 0; j < src.cols; j++)
{ // to x
float t =(i * oy)+ (j * ox); // wave parameter
for (int ch = 0; ch < 3; ch++)
{ // each colour
int offset_x =ox* (int)(25.0 * (sin(3.14 * t/ 150)));
int offset_y =oy* (int)(25.0 * (sin(3.14 * t / 150)));
if (i + offset_y < src.rows && j + offset_x < src.rows && i + offset_y >=0 && j + offset_x>=0)
{
result.at<cv::Vec3b>(i, j)[ch] = src.at<cv::Vec3b>(i + offset_y, j + offset_x )[ch];
}
else
result.at<cv::Vec3b>(i, j)[ch] = 0.0;
}
}
}
cv:: imshow("result", result);
cv::imwrite("result.jpg", result);
cv::waitKey();
}
The result:
BTW, may be better to use cv::remap ?

Laplacian Filter opencv c++

I was learning filters in OpenCV, but I'm a little confused about the Laplacian filter. My result is very different from the Laplacian filter in OpenCV lib.
For first, I use a Gaussian filter for the image:
Mat filtroGauss(Mat src){
Mat gauss = src.clone();
Mat temp(src.rows+2,src.cols+2,DataType<uchar>::type);
int y,x;
for (y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++) temp.at<uchar>(y+1,x+1) = src.at<uchar>(y,x);
}
int mask[lenMask*lenMask];
mask[0] = mask[2] = mask[6] = mask[8] = 1;
mask[1] = mask[3] = mask[5] = mask[7] = 2;
mask[4] = 4;
int denominatore = 0;
for (int i=0; i<lenMask*lenMask; i++) denominatore += mask[i];
int value[lenMask*lenMask];
for(y=0; y<src.rows; y++){
for (x=0; x<src.cols; x++){
value[0] = temp.at<uchar>(y-1,x-1)*mask[0];
value[1] = temp.at<uchar>(y-1,x)*mask[1];
value[2] = temp.at<uchar>(y-1,x+1)*mask[2];
value[3] = temp.at<uchar>(y,x-1)*mask[3];
value[4] = temp.at<uchar>(y,x)*mask[4];
value[5] = temp.at<uchar>(y,x+1)*mask[5];
value[6] = temp.at<uchar>(y+1,x-1)*mask[6];
value[7] = temp.at<uchar>(y+1,x)*mask[7];
value[8] = temp.at<uchar>(y+1,x+1)*mask[8];
int avg = 0;
for(int i=0; i<lenMask*lenMask; i++)avg+=value[i];
avg = avg/denominatore;
gauss.at<uchar>(y,x) = avg;
}
}
return gauss;
}
Then I use the Laplacian function:
L(y,x) = f(y-1,x) + f(y+1,x) + f(y,x-1) + f(y,x+1) + 4*f(y,x)
Mat filtroLaplace(Mat src){
Mat output = src.clone();
Mat temp = src.clone();
int y,x;
for (y =1; y<src.rows-1; y++){
for(x =1; x<src.cols-1; x++){
output.at<uchar>(y,x) = temp.at<uchar>(y-1,x) + temp.at<uchar>(y+1,x) + temp.at<uchar>(y,x-1) + temp.at<uchar>(y,x+1) -4*( temp.at<uchar>(y,x));
}
}
return output;
}
And here is the final result from my code:
OpenCV result:
Let's rewrite the function a little, so it's easier to discuss:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = sum;
}
}
return output;
}
The source of your problem is sum. Let's examine its range in scope of this algorithm, by taking the two extremes:
Black pixel, surrounded by 4 white. That means 255 + 255 + 255 + 255 - 4 * 0 = 1020.
White pixel, surrounded by 4 black. That means 0 + 0 + 0 + 0 - 4 * 255 = -1020.
When you perform output.at<uchar>(y, x) = sum; there's an implicit cast of the int back to unsigned char -- the high order bits simply get chopped off and the value overflows.
The correct approach to handle this situation (which OpenCV takes), is to perform saturation before the actual cast. Essentially
if (sum < 0) {
sum = 0;
} else if (sum > 255) {
sum = 255;
}
OpenCV provides function cv::saturate_cast<T> to do just this.
There's an additional problem that you're not handling the edge rows/columns of the input image -- you just leave them at the original value. Since you're not asking about that, I'll leave solving that as an excercise to the reader.
Code:
cv::Mat filtroLaplace(cv::Mat src)
{
cv::Mat output = src.clone();
for (int y = 1; y < src.rows - 1; y++) {
for (int x = 1; x < src.cols - 1; x++) {
int sum = src.at<uchar>(y - 1, x)
+ src.at<uchar>(y + 1, x)
+ src.at<uchar>(y, x - 1)
+ src.at<uchar>(y, x + 1)
- 4 * src.at<uchar>(y, x);
output.at<uchar>(y, x) = cv::saturate_cast<uchar>(sum);
}
}
return output;
}
Sample input:
Output of corrected filtroLaplace:
Output of cv::Laplacian:

Separable sobel filter implementation openCV C++

I am working on creating my own implementation of a separable sobel filter implementation. My function has as input the kernelSize, the horizontal filter of gradient Y as pixelsY1, the vertical filter of gradient Y as pixelsY2, the horizontal filter of gradient X as pixelsX1, the vertical filter of gradient X as pixelsX2.
The input of X1 is [1, 0, -1] (horizontal)
The input of X2 is [1, 2, 1] (vertical)
The input of Y1 is [1, 2, 1] (horizontal)
The input of Y2 is [1, 0 -1] (vertical)
void gradientFilter1D(Mat& img, int kernelSize, vector<double> pixelsY1, vector<double> pixelsY2, vector<double> pixelsX1, vector<double> pixelsX2)
{
int sumMin = INT_MAX, sumMax = INT_MIN;
//gradient X
vector<vector<int>> pixelsX(img.rows, vector<int>(img.cols, 0));
//gradient Y
vector<vector<int>> pixelsY(img.rows, vector<int>(img.cols, 0));
vector<vector<int>> sumArray(img.rows, vector<int>(img.cols, 0));
for (int j = kernelSize / 2; j < img.rows - kernelSize / 2; j++)
{
for (int i = kernelSize / 2; i < img.cols - kernelSize / 2; i++)
{
double totalX = 0;
double totalY = 0;
//this is the horizontal multiplication
for (int x = -kernelSize / 2; x <= kernelSize / 2; x++)
{
totalY += img.at<uchar>(j, i + x) * pixelsY1[x + (kernelSize / 2)];
totalX += img.at<uchar>(j, i + x) * pixelsX1[x + (kernelSize / 2)];
//cout << int(img.at<uchar>(j, i + x)) << " " << pixelsY1[x + (kernelSize / 2)] << endl;
}
pixelsX[j][i] = totalX;
pixelsY[j][i] = totalY;
}
}
for (int j = kernelSize / 2; j < img.rows - kernelSize / 2; j++)
{
for (int i = kernelSize / 2; i < img.cols - kernelSize / 2; i++)
{
double totalX = 0;
double totalY = 0;
//this is the vertical multiplication
for (int x = -kernelSize / 2; x <= kernelSize / 2; x++)
{
totalY += pixelsY[j + x][i] * pixelsY2[x + (kernelSize / 2)];
totalX += pixelsX[j + x][i] * pixelsX2[x + (kernelSize / 2)];
//cout << int(img.at<uchar>(j, i + x)) << " " << pixelsY1[x + (kernelSize / 2)] << endl;
}
pixelsX[j][i] = totalX;
pixelsY[j][i] = totalY;
}
}
for (int j = 0; j < img.rows; j++)
{
for (int i = 0; i < img.cols; i++)
{
int sum;
sum = sqrt(pow(pixelsX[j][i], 2) + pow(pixelsY[j][i], 2));
sumArray[j][i] = sum;
sumMin = sumMin < sum ? sumMin : sum;
sumMax = sumMax > sum ? sumMax : sum;
}
}
//normalization
for (int j = 0; j < img.rows; j++)
for (int i = 0; i < img.cols; i++)
{
sumArray[j][i] = (sumArray[j][i] - sumMin) * ((255.0 - 0) / (sumMax - sumMin)) + 0;
img.at<uchar>(j, i) = sumArray[j][i];
}
}
Input Image:
Output Image:
What am I doing wrong?
The separable filter is computed in what are effectively two passes. (The passes can be interleaved, but all the values used by the vertical filter have to have already been computed by the horizontal filter if doing it in that order.) Right below the comment //then here I do the vertical multiplication there are accesses to pixelsX and pixelsY that are effectively a second pass of the separable filter. The values being accessed for negative values of x have been previously computed and the ones for positive values of x have not yet been computed by the horizontal pass.
check out Halide. It makes this sort of code a lot easier and more performant. (A double nesting of std::vector is not a good way to go.)
Okay, so my mistake was actually in this
for (int j = kernelSize / 2; j < img.rows - kernelSize / 2; j++)
{
for (int i = kernelSize / 2; i < img.cols - kernelSize / 2; i++)
{
double totalX = 0;
double totalY = 0;
//this is the vertical multiplication
for (int x = -kernelSize / 2; x <= kernelSize / 2; x++)
{
totalY += pixelsY[j + x][i] * pixelsY2[x + (kernelSize / 2)];
totalX += pixelsX[j + x][i] * pixelsX2[x + (kernelSize / 2)];
//cout << int(img.at<uchar>(j, i + x)) << " " << pixelsY1[x + (kernelSize / 2)] << endl;
}
pixelsX[j][i] = totalX; <---- I overwrite the old values
pixelsY[j][i] = totalY; <--- I overwrite the old values
}
}
So, pixelsX[j][i] = totalX and so forth is wrong, because I need the old values in order to finish the computation in the rest of the j, and i loops. So, I created another vector of vectors and pushed in it the totalX's and Y's, and this solved my issue.

Histogram of oriented gradiants

For a project I'm writing some code to compute the HoG of some images, but I'm stuck with the fact that my orientations are only between 0 ~ 90 degrees, while using the atan2 function.
I'm guessing that this problem occurs due to the filter2D function of OpenCV but I'm not sure if this is the reason or that I'm doing something else wrong:
Vector<Vector<Mat_<float>>> HoG(Mat image) {
Mat img_x;
Mat img_y;
IplImage img = image;
Mat kern_x = (Mat_<char>(1, 3) << -1, 0, 1);
Mat kern_y = (Mat_<char>(3, 1) << -1, 0, 1);
filter2D(image, img_x, image.depth(), kern_x);
filter2D(image, img_y, image.depth(), kern_y);
Vector<Vector<Mat_<float>>> histograms;
for(int y = 0; y < image.rows - size; y += size) {
Vector<Mat_<float>> temp_hist;
for(int x = 0; x < image.cols - size; x += size) {
float total_mag = 0;
Mat hist = Mat::zeros(1, 8, CV_32FC1);
for(int i = y; i < y + size; ++i) {
for(int j = x; j < x + size; ++j) {
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
double ori = myatan2(grad_x, grad_y);
float mag = sqrt(pow(grad_x, 2) + pow(grad_y, 2));
int bin = round(ori/45);
hist.at<float>(0, (bin - 1 < 0 ? 7 : bin - 1)) += - (float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
hist.at<float>(0, bin) += -(float)(ori - ((round(ori/45) - 1) * 45.0 + 22.5)) / 45.0f;
total_mag += mag;
}
}
// Normalize the histogram
for(int i = 0; i < 8; ++i) {
hist.at<float>(0, i) = hist.at<float>(0, i) / total_mag;
}
temp_hist.push_back(hist);
}
histograms.push_back(temp_hist);
}
return histograms;
}
If you have any other tips to increase a speed-up in my code or something else that is also welcome of course.
I notice this:
float grad_x = (float)img_x.at<uchar>(i, j);
float grad_y = (float)img_y.at<uchar>(i, j);
You seem to be using uchar. Should this not be char?