Smooth or blur image edges using opencv - c++

I want to smooth or blur image edges using opencv. i follow some links on stackoverflow but did not get accurate result.
This is what i tried
int lowThreshold = 100;
int ratio = 3;
int kernelSize = 3;
Mat srcGray, cannyEdges, blurred;
cvtColor(input, srcGray, CV_BGR2GRAY);
blur(srcGray, cannyEdges, Size(3, 3));
Canny(cannyEdges, cannyEdges, lowThreshold, lowThreshold * ratio, kernelSize);
int dilation_size = 5;
Mat element = getStructuringElement(MORPH_CROSS,
Size(2 * dilation_size + 1, 2 * dilation_size + 1),
Point(dilation_size, dilation_size));
dilate(cannyEdges, cannyEdges, element);
input.copyTo(blurred);
blur(blurred, blurred, Size(3, 3));
blurred.copyTo(output, cannyEdges);
This is the image of my input, my output and desired result
i am using this image as input

Related

Why do I get bad results with calcBackProject in opencv?

I create a project to reduce noise in the target image. I want to do something on Histograms, so I use calcHist to get the histogram of the image. When I use calcBackProject to recover images, the results are very different from the source image that I can't understand. The results show that the histogram of the two images is significant differences. Are there some problems with histogram processing?
By the way, my OpenCV version is 4.5.0.
int main()
{
Mat src = imread("./newpaperback.jpg");
imshow("src", src);
Mat gray;
cvtColor(src, gray, COLOR_BGR2GRAY);
imshow("gray", gray);
const int channels[] = {0};
Mat hist;
int dims = 1;
const int histSize[] = {256};;
float granges[] = {0, 256};
const float* ranges[] = { granges };
calcHist(&gray, 1, channels, Mat(), hist, dims, histSize, ranges, true, false);
int hist_height = 256;
Mat hist_image = Mat::zeros(hist_height, hist_height * 2, CV_8UC3);
double max_val;
minMaxLoc(hist, 0, &max_val, 0, 0);
for(int i = 0; i < hist_height; i++)
{
float bin_val = hist.at<float>(i);
segment[i] = bin_val * hist_height / max_val;
int intensity = bin_val * hist_height / max_val;
rectangle(hist_image, Point(i * 2, hist_height - 1), Point((i + 1) * 2 - 1, hist_height - intensity), Scalar(255, 255, 255));
}
imshow("hist", hist_image);
Mat back_img;
calcBackProject(&gray, 1, channels, hist, back_img, ranges, 1, true);
imshow("back", back_img);
waitKey(0);
return 0;
}

Rotate an image using OpenCV with C++ on interface

I'm developing Rotate an image using OpenCV in C++ on interface project.So, I have some problems with this code.Is there any way to solve this code...?
IplImage* source_image = cvLoadImage(ip, 1);
IplImage *rotate_image = cvCreateImage(cvGetSize(source_image), IPL_DEPTH_8U, 1);
cvNamedWindow("rotate_image", CV_WINDOW_FREERATIO);
int angle = 180;
cvCreateTrackbar("Angle", rotate_image,&angle,360);
int image_height = source_image.rows / 2;
int image_width = source_image.cols / 2;
IplImage *rotatetion = cvCreateImage(cvGetSize(source_image), IPL_DEPTH_8U, 1);
rotatetion = cv2DRotationMatrix(Point(image_height,image_width),(angle - 180), 1);
IplImage *rotated_image = cvCreateImage(cvGetSize(rotatetion), IPL_DEPTH_8U, 1);
cvWarpAffine(dialateImage,Rotated_Image,Rotatetion,dialateImage.size());
cvShowImage("rotateImage", rotated_image);
From https://docs.opencv.org/2.4/doc/tutorials/imgproc/imgtrans/warp_affine/warp_affine.html
Mat rot_mat( 2, 3, CV_32FC1 );
Mat warp_mat( 2, 3, CV_32FC1 );
Mat src, warp_dst, warp_rotate_dst;
/// Load the image
src = imread( argv[1], 1 );
/// Set the dst image the same type and size as src
warp_dst = Mat::zeros( src.rows, src.cols, src.type() );
/// Compute a rotation matrix with respect to the center of the image
Point center = Point( warp_dst.cols/2, warp_dst.rows/2 );
double angle = -50.0;
double scale = 0.6;
/// Get the rotation matrix with the specifications above
rot_mat = getRotationMatrix2D( center, angle, scale );
/// Rotate the warped image
warpAffine( warp_dst, warp_rotate_dst, rot_mat, warp_dst.size() );
Don't use obsolete C api like #Miki said and ask a clear question, not "this code no worky".
try this.worked.
IplImage* source_image;
IplImage *dest = cvCloneImage(source_image);
CvPoint2D32f center;
center.x = dest->width / 2;
center.y = dest->height / 2;
CvMat *mapMatrix = cvCreateMat(2, 3, CV_32FC1);
double angle = System::Convert::ToDouble(numericUpDown1->Value);
cv2DRotationMatrix(center, angle, 1.0, mapMatrix);
cvWarpAffine(source_image, dest, mapMatrix, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll(0));
cvReleaseMat(&mapMatrix);
cvShowImage("Rotated", dest);

Hough Circles, and an overly complicated solution

So I've been working on recognizing a yoga ball with Hough Circles. Now, when converted to grayscale, it works straight away. Unfortunately, I have to take a more complicated procedure due to there being multiple of these coloured balls and only wanted to detect the blue.
Unfiltered ball:
Filtered ball:
Steps of my algorithm:
convert from BGR to HSV
blur the image
filter HSV for only select values (in my case dark blue to light blue due to lighting)
invert the image
use morphology to fill in the part that was lighted
blur again
filter the blur so it's a solid shape instead of unrecognisable blurry grayscale
detect with hough-circles. The MAT is still Grayscale so that isn't the problem.
Code:
#include <iostream>
#include <string>
#include <iomanip>
#include <sstream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
using namespace cv;
int main(int argc, char *argv[])
{
// Morphology stuff
Mat element5(30, 30, CV_8U, Scalar(1));
int morph_elem = 1; // 2
int morph_size = 33;// 30
int morph_operator = 2; // 2
Mat element = getStructuringElement(morph_elem, Size(2 * morph_size + 1, 2 * morph_size + 1), Point(morph_size, morph_size));
int const max_operator = 4;
int const max_elem = 2;
int const max_kernel_size = 21;
Mat kernel;
// Display Windows Name
namedWindow("Testing Purposes", CV_WINDOW_AUTOSIZE);
Mat src; // loaded image
Mat hsv; // changed src into HSV
Mat Filtered; // filtered w/ inRange for blue ball
Mat Gray; // gray filter for src
Mat dst; // destination for canny edge
Mat detected_edges; // matrix of edges w/ canny
// thresholds for canny
int edgeThresh = 45;
int lowThreshold;
int const max_lowThreshold = 100;
src = imread(argv[1]);
cvtColor(src, Gray, CV_BGR2GRAY);
cvtColor(src, hsv, CV_BGR2HSV);
/*
// CannyEdge Testing
blur(Gray, detected_edges, Size(3, 3)); // blur the grayimage
Canny(detected_edges, detected_edges, lowThreshold, lowThreshold * ratio, kernel_size);
dst = Scalar::all(0);
src.copyTo( dst, detected_edges);
imshow(window_name,dst);
*/
// hsv blur and then thresholds
blur(hsv,hsv,Size(4, 4), Point(-1, -1));
inRange(hsv, Scalar(100, 100, 0), Scalar(200, 200, 255), Filtered); //filtering after blur
vector<Vec3f> circles; //vector for holding info on circles
// houghcircles - attempts to detect circles in the Filtered image we passed it
// morphology defintion for Kernel
bitwise_not(Filtered, Filtered);
// imwrite("/home/bjacobs/Desktop/Testing.jpg", Filtered);
imwrite("/home/bjacobs/Desktop/Testingg.jpg", Filtered);
morphologyEx(Filtered, dst, MORPH_OPEN, element);
blur(dst, dst, Size(20, 20), Point(-1, -1));
Mat baw = dst > 128;
HoughCircles(baw ,circles, CV_HOUGH_GRADIENT, 1, baw.rows/8,200,100,0,0);
imwrite("/home/bjacobs/Desktop/Testing.jpg", baw);
// Draw the circles detected onto the SRC file
for(size_t i = 0; i < circles.size(); i++)
{
Point center(cvRound(circles[i][0]), cvRound(circles[i][3]));
int radius = cvRound(circles[i][2]);
// circle center
circle(src, center, 3, Scalar(0, 255, 0), -1, 8, 0);
// circle outline
circle(src, center, radius, Scalar(0, 0, 255), 3, 8, 0);
}
imwrite("/home/bjacobs/Desktop/Test.jpg", hsv);
imshow("Testing Purposes", src);
waitKey(0);
}
I've already read as much as I possibly could online on this matter, and nothing I've found so far has helped. Forgive the sloppy commenting, and there are some failed algorithms included with using Canny Edge detection, so don't pay too much mind to them. Does anyone know of a solution to this detection issue?
Instead of using houghcircle you can do the following.
Segment the blue color.
Find contours(largest).
Minimum enclosing circle for contour.

How to do inverse DFT using magnitude and phase of a image in opencv?

I want to show that phase of an image carries more information than that of its magnitude, so I want to exchange the magnitude of two image and then do the inverse DFT.
here is the code:
void main()
{
Mat I1 = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat I2 = imread("peppers.png", CV_LOAD_IMAGE_GRAYSCALE);
Mat padded1,padded2;
//expand input image to optimal size
int m1= getOptimalDFTSize( I1.rows );
int n1 = getOptimalDFTSize( I1.cols );
int m2= getOptimalDFTSize( I2.rows );
int n2 = getOptimalDFTSize( I2.cols );
// on the border add zero values
copyMakeBorder(I1, padded1, 0, m1 - I1.rows, 0, n1 - I1.cols, BORDER_CONSTANT, Scalar::all(0));
copyMakeBorder(I2, padded2, 0, m2 - I2.rows, 0, n2 - I2.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes1[] = {Mat_<float>(padded1), Mat::zeros(padded1.size(), CV_32F)};
Mat planes2[] = {Mat_<float>(padded2), Mat::zeros(padded2.size(), CV_32F)};
Mat complexI, complexII;
// Add to the expanded another plane with zeros
merge(planes1, 2, complexI);
merge(planes2, 2, complexII);
dft(complexI, complexI);
dft(complexII, complexII);
// compute the magnitude and phase then switch to logarithmic scale
// => magnitude:log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)), phase:arctan(Im(DFT(I)),Re(DFT(I)))
split(complexI, planes1);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
Mat ph1, magI1;
phase(planes1[0], planes1[1], ph1);//ph1 = phase
magnitude(planes1[0], planes1[1], magI1);// magI1 = magnitude
magI1 = magI1(Rect(0, 0, magI1.cols & -2, magI1.rows & -2));
ph1 = ph1(Rect(0, 0, ph1.cols & -2, ph1.rows & -2));
split(complexII, planes2);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
Mat ph2, magI2;
phase(planes2[0], planes2[1], ph2);//ph2 = phase
magnitude(planes2[0], planes2[1], magI2);// mag2 = magnitude
magI2 = magI2(Rect(0, 0, magI2.cols & -2, magI2.rows & -2));
ph2 = ph2(Rect(0, 0, ph2.cols & -2, ph2.rows & -2));
planes1[1] = ph1; planes1[0] = magI2;
planes2[1] = ph2; planes2[0] = magI1;
dft(complexI,complexI,DFT_INVERSE);
dft(complexII,complexII,DFT_INVERSE);
imshow("image", complexI);
waitKey();
}
I just simply merge magnitude and phase together then do the IDFT, seems totally wrong.
I guess from your question that something with your dft is not working.
Try the below Code (after adding your split Planes) and see if it works.
The images have to be the Exact same Size.
If something else is wrong: please show your images and results. Maybe your code is correct and you are just expecting the wrong thing.
Here is the working Example:
// Load an image
Mat I1 = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat I2 = imread("peppers.png", CV_LOAD_IMAGE_GRAYSCALE);
Mat fI1;
Mat fI2;
I1.convertTo(fI1, CV_32F);
I2.convertTo(fI2, CV_32F);
//Perform DFT
Mat fourierTransform1;
Mat fourierTransform2;
dft(fI1, fourierTransform1, DFT_SCALE|DFT_COMPLEX_OUTPUT);
dft(fI2, fourierTransform2, DFT_SCALE|DFT_COMPLEX_OUTPUT);
//your split plane and everything else
//Perform IDFT
Mat inverseTransform1;
Mat inverseTransform2;
dft(fourierTransform1, inverseTransform1, DFT_INVERSE|DFT_REAL_OUTPUT);
dft(fourierTransform2, inverseTransform2, DFT_INVERSE|DFT_REAL_OUTPUT);
Mat result1;
Mat result2;
inverseTransform1.convertTo(result1, CV_8U);
inverseTransform2.convertTo(result2, CV_8U);
You can't merge the magnitude and phase by using cv::merge() function, instead you should use cv::polarToCart(). Here's my code to do what you want:
using namespace cv;
// Rearrange the quadrants of a Fourier image so that the origin is at
// the image center
void shiftDFT(Mat &fImage )
{
Mat tmp, q0, q1, q2, q3;
// first crop the image, if it has an odd number of rows or columns
fImage = fImage(Rect(0, 0, fImage.cols & -2, fImage.rows & -2));
int cx = fImage.cols / 2;
int cy = fImage.rows / 2;
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
q0 = fImage(Rect(0, 0, cx, cy));
q1 = fImage(Rect(cx, 0, cx, cy));
q2 = fImage(Rect(0, cy, cx, cy));
q3 = fImage(Rect(cx, cy, cx, cy));
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
}
int main()
{
// Load an image
Mat I1 = imread("lena.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat I2 = imread("pepper.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat fI1;
Mat fI2;
I1.convertTo(fI1, CV_32F);
I2.convertTo(fI2, CV_32F);
//expand input image to optimal size
int m = getOptimalDFTSize( I1.rows );
int n = getOptimalDFTSize( I1.cols );
Mat padded1, padded2;
// on the border add zero values
copyMakeBorder(fI1, padded1, 0, m - I1.rows, 0, n - I1.cols, BORDER_CONSTANT, Scalar::all(0));
copyMakeBorder(fI2, padded2, 0, m - I2.rows, 0, n - I2.cols, BORDER_CONSTANT, Scalar::all(0));
//Perform DFT
Mat fourierTransform1;
Mat fourierTransform2;
Mat planes1[2], planes2[2];
dft(fI1, fourierTransform1, DFT_SCALE|DFT_COMPLEX_OUTPUT);
dft(fI2, fourierTransform2, DFT_SCALE|DFT_COMPLEX_OUTPUT);
shiftDFT(fourierTransform1);
shiftDFT(fourierTransform2);
split(fourierTransform1, planes1);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
split(fourierTransform2, planes2);// planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
Mat ph1, mag1;
mag1.zeros(planes1[0].rows, planes1[0].cols, CV_32F);
ph1.zeros(planes1[0].rows, planes1[0].cols, CV_32F);
cartToPolar(planes1[0], planes1[1], mag1, ph1);
Mat ph2, mag2;
mag2.zeros(planes2[0].rows, planes2[0].cols, CV_32F);
ph2.zeros(planes2[0].rows, planes2[0].cols, CV_32F);
cartToPolar(planes2[0], planes2[1], mag2, ph2);
polarToCart(mag1, ph2, planes1[0], planes1[1]);
polarToCart(mag2, ph1, planes2[0], planes2[1]);
merge(planes1, 2, fourierTransform1);
merge(planes2, 2, fourierTransform2);
shiftDFT(fourierTransform1);
shiftDFT(fourierTransform2);
//Perform IDFT
Mat inverseTransform1, inverseTransform2;
dft(fourierTransform1, inverseTransform1, DFT_INVERSE|DFT_REAL_OUTPUT);
dft(fourierTransform2, inverseTransform2, DFT_INVERSE|DFT_REAL_OUTPUT);
namedWindow("original image 1");
imshow("original image 1", I1);
namedWindow("original image 2");
imshow("original image 2", I2);
waitKey(0);
cv::Mat out1, out2;
inverseTransform1.convertTo(out1, CV_8U);
inverseTransform2.convertTo(out2, CV_8U);
namedWindow("result image 1");
imshow("result image 1", out1);
namedWindow("result image 2");
imshow("result image 2", out2);
waitKey(0);
}

How to do inverse DFT in OpenCV [duplicate]

This question already has an answer here:
Inverse fourier transformation in OpenCV
(1 answer)
Closed 6 years ago.
I'm trying to implement inverse DFT using OpenCV in C++
I downloaded complete dft example in docs.opencv.org and just adjust couple of lines to inverse.
my DFT code is like this
Mat DFT(const char* filename)
{
Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE);
if (I.empty())
{
Mat emty(7, 7, CV_32FC2, Scalar(1, 3));
return emty;
}
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize(I.rows);
int n = getOptimalDFTSize(I.cols); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = { Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
dft(complexI, complexI); // this way the result may fit in the source matrix
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a
// viewable image form (float between values 0 and 1).
imshow("Input Image", I); // Show the result
imshow(filename, magI);
// waitKey();
return magI;
}
and did IDFT. By just fix dft to idft. But the output was just look like noise. What did I do wrong? I thought the dft and idft is just same....
Mat IDFT(Mat src)
{
Mat I = src;
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize(I.rows);
int n = getOptimalDFTSize(I.cols); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = { Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F) };
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
dft(complexI, complexI, DFT_INVERSE); // this way the result may fit in the source matrix
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(IDFT(I))^2 + Im(IDFT(I))^2))
split(complexI, planes); // planes[0] = Re(IDFT(I), planes[1] = Im(IDFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
normalize(magI, magI, 0, 1, CV_MINMAX);
imshow("forged map", magI);
return magI;
}
you have to rewrite your code like this to get the inverse DFT which is the original image read :
#include "stdafx.h"
#include <opencv2/core/core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace std;
using namespace cv;
int main()
{
Mat I = imread("test.tif", CV_LOAD_IMAGE_GRAYSCALE);
if( I.empty())
return -1;
Mat padded; //expand input image to optimal size
int m = getOptimalDFTSize( I.rows );
int n = getOptimalDFTSize( I.cols ); // on the border add zero values
copyMakeBorder(I, padded, 0, m - I.rows, 0, n - I.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI); // Add to the expanded another plane with zeros
dft(complexI, complexI); // this way the result may fit in the source matrix
// compute the magnitude and switch to logarithmic scale
// => log(1 + sqrt(Re(DFT(I))^2 + Im(DFT(I))^2))
split(complexI, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], planes[0]);// planes[0] = magnitude
Mat magI = planes[0];
magI += Scalar::all(1); // switch to logarithmic scale
log(magI, magI);
// crop the spectrum, if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a
normalize(phaseVals, phaseVals, 0, 1, CV_MINMAX);
// viewable image form (float between values 0 and 1).
imshow("Input Image" , I ); // Show the result
imshow("Spectrum Magnitude", magI);
waitKey();
//calculating the idft
cv::Mat inverseTransform;
cv::dft(complexI, inverseTransform, cv::DFT_INVERSE|cv::DFT_REAL_OUTPUT);
normalize(inverseTransform, inverseTransform, 0, 1, CV_MINMAX);
imshow("Reconstructed", inverseTransform);
waitKey();
return 0;
}
I just added this part to your code :
//calculating the idft
cv::Mat inverseTransform;
cv::dft(complexI, inverseTransform, cv::DFT_INVERSE|cv::DFT_REAL_OUTPUT);
normalize(inverseTransform, inverseTransform, 0, 1, CV_MINMAX);
imshow("Reconstructed", inverseTransform);
waitKey();
for some reason using the normalize function in the given idft block doesn't give exactly the desired original image (contrast wise). Instead use convertTo. To be precise, replace the normalize function with the following line.
inverseTransform.convertTo(inverseTransform, CV_8U);