Weiner deconvolution using opencv - c++

I have developed a way to estimate the point spread function of a motion blur, however I'd like to use the PSF to perform deconvolution. I decided to use the wiener method.
cv::Mat deconvolution(cv::Mat input, cv::Mat kernel){
cv::Mat Fin, Fkern, padded_kern, Fdeblur,out;
cv::normalize(kernel,kernel);
cv::dft(input,Fin,cv::DFT_COMPLEX_OUTPUT);
cv::copyMakeBorder(kernel,padded_kern,0,Fin.rows-kernel.rows,0,Fin.cols-kernel.cols,cv::BORDER_CONSTANT,cv::Scalar::all(0));
cv::dft(padded_kern,Fkern,cv::DFT_COMPLEX_OUTPUT);
cv::mulSpectrums(Fin,Fkern,Fdeblur,0,true);
cv::dft(Fdeblur,out,cv::DFT_INVERSE|cv::DFT_REAL_OUTPUT);
cv::normalize(out,out,0, 1, CV_MINMAX);
return out;
}
However even after setting cv::mulSpectrums(Fin,Fkern,Fdeblur,0,true); last option to true, I still seem to be performing a normal convolution. Should'nt the last true option mean that I am multiplying by the conjugate and therefore dividing the kernel?

Just implemented this filter:
#include <windows.h>
#include <iostream>
#include <vector>
#include <stdio.h>
#include "fstream"
#include "iostream"
#include <algorithm>
#include <iterator>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
#include <iostream>
#include <vector>
void Recomb(Mat &src, Mat &dst)
{
int cx = src.cols >> 1;
int cy = src.rows >> 1;
Mat tmp;
tmp.create(src.size(), src.type());
src(Rect(0, 0, cx, cy)).copyTo(tmp(Rect(cx, cy, cx, cy)));
src(Rect(cx, cy, cx, cy)).copyTo(tmp(Rect(0, 0, cx, cy)));
src(Rect(cx, 0, cx, cy)).copyTo(tmp(Rect(0, cy, cx, cy)));
src(Rect(0, cy, cx, cy)).copyTo(tmp(Rect(cx, 0, cx, cy)));
dst = tmp;
}
void convolveDFT(Mat& A, Mat& B, Mat& C)
{
// reallocate the output array if needed
C.create(abs(A.rows - B.rows) + 1, abs(A.cols - B.cols) + 1, A.type());
Size dftSize;
// compute the size of DFT transform
dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);
dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);
// allocate temporary buffers and initialize them with 0's
Mat tempA(dftSize, A.type(), Scalar::all(0));
Mat tempB(dftSize, B.type(), Scalar::all(0));
// copy A and B to the top-left corners of tempA and tempB, respectively
Mat roiA(tempA, Rect(0, 0, A.cols, A.rows));
A.copyTo(roiA);
Mat roiB(tempB, Rect(0, 0, B.cols, B.rows));
B.copyTo(roiB);
// now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
dft(tempA, tempA, 0, A.rows);
dft(tempB, tempB, 0, A.rows);
// multiply the spectrums;
// the function handles packed spectrum representations well
mulSpectrums(tempA, tempB, tempA, 0);
// transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
dft(tempA, tempA, DFT_INVERSE + DFT_SCALE);
// now copy the result back to C.
C = tempA(Rect((dftSize.width - A.cols) / 2, (dftSize.height - A.rows) / 2, A.cols, A.rows)).clone();
// all the temporary buffers will be deallocated automatically
}
//----------------------------------------------------------
// Compute Re and Im planes of FFT from Image
//----------------------------------------------------------
void ForwardFFT(Mat &Src, Mat *FImg)
{
int M = getOptimalDFTSize(Src.rows);
int N = getOptimalDFTSize(Src.cols);
Mat padded;
copyMakeBorder(Src, padded, 0, M - Src.rows, 0, N - Src.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = { Mat_<double>(padded), Mat::zeros(padded.size(), CV_64FC1) };
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
split(complexImg, planes);
// crop result
planes[0] = planes[0](Rect(0, 0, Src.cols, Src.rows));
planes[1] = planes[1](Rect(0, 0, Src.cols, Src.rows));
FImg[0] = planes[0].clone();
FImg[1] = planes[1].clone();
}
//----------------------------------------------------------
// Compute image from Re and Im parts of FFT
//----------------------------------------------------------
void InverseFFT(Mat *FImg, Mat &Dst)
{
Mat complexImg;
merge(FImg, 2, complexImg);
dft(complexImg, complexImg, DFT_INVERSE + DFT_SCALE);
split(complexImg, FImg);
Dst = FImg[0];
}
//----------------------------------------------------------
// wiener Filter
//----------------------------------------------------------
void wienerFilter(Mat &src, Mat &dst, Mat &_h, double k)
{
//---------------------------------------------------
// Small epsilon to avoid division by zero
//---------------------------------------------------
const double eps = 1E-8;
//---------------------------------------------------
int ImgW = src.size().width;
int ImgH = src.size().height;
//--------------------------------------------------
Mat Yf[2];
ForwardFFT(src, Yf);
//--------------------------------------------------
Mat h = Mat::zeros(ImgH, ImgW, CV_64FC1);
int padx = h.cols - _h.cols;
int pady = h.rows - _h.rows;
copyMakeBorder(_h, h, pady / 2, pady - pady / 2, padx / 2, padx - padx / 2, BORDER_CONSTANT, Scalar::all(0));
Mat Hf[2];
ForwardFFT(h, Hf);
//--------------------------------------------------
Mat Fu[2];
Fu[0] = Mat::zeros(ImgH, ImgW, CV_64FC1);
Fu[1] = Mat::zeros(ImgH, ImgW, CV_64FC1);
complex<double> a;
complex<double> b;
complex<double> c;
double Hf_Re;
double Hf_Im;
double Phf;
double hfz;
double hz;
double A;
for (int i = 0; i < h.rows; i++)
{
for (int j = 0; j < h.cols; j++)
{
Hf_Re = Hf[0].at<double>(i, j);
Hf_Im = Hf[1].at<double>(i, j);
Phf = Hf_Re*Hf_Re + Hf_Im*Hf_Im;
hfz = (Phf < eps)*eps;
hz = (h.at<double>(i, j) > 0);
A = Phf / (Phf + hz + k);
a = complex<double>(Yf[0].at<double>(i, j), Yf[1].at<double>(i, j));
b = complex<double>(Hf_Re + hfz, Hf_Im + hfz);
c = a / b; // Deconvolution :) other work to avoid division by zero
Fu[0].at<double>(i, j) = (c.real()*A);
Fu[1].at<double>(i, j) = (c.imag()*A);
}
}
InverseFFT(Fu, dst);
Recomb(dst, dst);
}
// ---------------------------------
//
// ---------------------------------
int main(int argc, char** argv)
{
namedWindow("Image");
namedWindow("Kernel");
namedWindow("Result");
Mat Img = imread("F:\\ImagesForTest\\lena.jpg", 0); // Source image
Img.convertTo(Img, CV_32FC1, 1.0 / 255.0);
Mat kernel = imread("F:\\ImagesForTest\\Point.jpg", 0); // PSF
//resize(kernel, kernel, Size(), 0.5, 0.5);
kernel.convertTo(kernel, CV_32FC1, 1.0 / 255.0);
float kernel_sum = cv::sum(kernel)[0];
kernel /= kernel_sum;
int width = Img.cols;
int height = Img.rows;
Mat resim;
convolveDFT(Img, kernel, resim);
Mat resim2;
kernel.convertTo(kernel, CV_64FC1);
// Apply filter
wienerFilter(resim, resim2, kernel, 0.01);
imshow("Результат фильтрации", resim2);
imshow("Kernel", kernel * 255);
imshow("Image", Img);
imshow("Result", resim);
cvWaitKey(0);
}
Results look like this (as you see it not 100% restoration):

Related

Unsharp mask implementation with OpenCV

I want to apply unsharp mask like Adobe Photoshop,
I know this answer, but it's not as sharp as Photoshop.
Photoshop has 3 parameters in Smart Sharpen dialog: Amount, Radius, Reduce Noise; I want to implement all of them.
This is the code I wrote, according to various sources in SO.
But the result is good in some stages ("blurred", "unsharpMask", "highContrast"), but in the last stage ("retval") the result is not good.
Where am I wrong, what should I improve?
Is it possible to improve the following algorithm in terms of performance?
#include "opencv2/opencv.hpp"
#include "fstream"
#include "iostream"
#include <chrono>
using namespace std;
using namespace cv;
// from https://docs.opencv.org/3.4/d3/dc1/tutorial_basic_linear_transform.html
void increaseContrast(Mat img, Mat* dst, int amountPercent)
{
*dst = img.clone();
double alpha = amountPercent / 100.0;
*dst *= alpha;
}
// from https://stackoverflow.com/a/596243/7206675
float luminanceAsPercent(Vec3b color)
{
return (0.2126 * color[2]) + (0.7152 * color[1]) + (0.0722 * color[0]);
}
// from https://stackoverflow.com/a/2938365/7206675
Mat usm(Mat original, int radius, int amountPercent, int threshold)
{
// copy original for our return value
Mat retval = original.clone();
// create the blurred copy
Mat blurred;
cv::GaussianBlur(original, blurred, cv::Size(0, 0), radius);
cv::imshow("blurred", blurred);
waitKey();
// subtract blurred from original, pixel-by-pixel to make unsharp mask
Mat unsharpMask;
cv::subtract(original, blurred, unsharpMask);
cv::imshow("unsharpMask", unsharpMask);
waitKey();
Mat highContrast;
increaseContrast(original, &highContrast, amountPercent);
cv::imshow("highContrast", highContrast);
waitKey();
// assuming row-major ordering
for (int row = 0; row < original.rows; row++)
{
for (int col = 0; col < original.cols; col++)
{
Vec3b origColor = original.at<Vec3b>(row, col);
Vec3b contrastColor = highContrast.at<Vec3b>(row, col);
Vec3b difference = contrastColor - origColor;
float percent = luminanceAsPercent(unsharpMask.at<Vec3b>(row, col));
Vec3b delta = difference * percent;
if (*(uchar*)&delta > threshold) {
retval.at<Vec3b>(row, col) += delta;
//retval.at<Vec3b>(row, col) = contrastColor;
}
}
}
return retval;
}
int main(int argc, char* argv[])
{
if (argc < 2) exit(1);
Mat mat = imread(argv[1]);
mat = usm(mat, 4, 110, 66);
imshow("usm", mat);
waitKey();
//imwrite("USM.png", mat);
}
Original Image:
Blurred stage - Seemingly good:
UnsharpMask stage - Seemingly good:
HighContrast stage - Seemingly good:
Result stage of my code - Looks bad!
Result From Photoshop - Excellent!
First of all, judging by the artefacts that Photoshop left on the borders of the petals, I'd say that it applies the mask by using a weighted sum between the original image and the mask, as in the answer you tried first.
I modified your code to implement this scheme and I tried to tweak the parameters to get as close as the Photoshop result, but I couldn't without creating a lot of noise. I wouldn't try to guess what Photoshop is exactly doing (I would definitely like to know), however I discovered that it is fairly reproducible by applying some filter on the mask to reduce the noise. The algorithm scheme would be:
blurred = blur(image, Radius)
mask = image - blurred
mask = some_filter(mask)
sharpened = (mask < Threshold) ? image : image - Amount * mask
I implemented this and tried using basic filters (median blur, mean filter, etc) on the mask and this is the kind of result I can get:
which is a bit noisier than the Photoshop image but, in my opinion, close enough to what you wanted.
On another note, it will of course depend on the usage you have for your filter, but I think that the settings you used in Photoshop are too strong (you have big overshoots near petals borders). This is sufficient to have a nice image at the naked eye, with limited overshoot:
Finally, here is the code I used to generate the two images above:
#include <opencv2/opencv.hpp>
#include <iostream>
using namespace std;
using namespace cv;
Mat usm(Mat original, float radius, float amount, float threshold)
{
// work using floating point images to avoid overflows
cv::Mat input;
original.convertTo(input, CV_32FC3);
// copy original for our return value
Mat retbuf = input.clone();
// create the blurred copy
Mat blurred;
cv::GaussianBlur(input, blurred, cv::Size(0, 0), radius);
// subtract blurred from original, pixel-by-pixel to make unsharp mask
Mat unsharpMask;
cv::subtract(input, blurred, unsharpMask);
// --- filter on the mask ---
//cv::medianBlur(unsharpMask, unsharpMask, 3);
cv::blur(unsharpMask, unsharpMask, {3,3});
// --- end filter ---
// apply mask to image
for (int row = 0; row < original.rows; row++)
{
for (int col = 0; col < original.cols; col++)
{
Vec3f origColor = input.at<Vec3f>(row, col);
Vec3f difference = unsharpMask.at<Vec3f>(row, col);
if(cv::norm(difference) >= threshold) {
retbuf.at<Vec3f>(row, col) = origColor + amount * difference;
}
}
}
// convert back to unsigned char
cv::Mat ret;
retbuf.convertTo(ret, CV_8UC3);
return ret;
}
int main(int argc, char* argv[])
{
if (argc < 3) exit(1);
Mat original = imread(argv[1]);
Mat expected = imread(argv[2]);
// closer to Photoshop
Mat current = usm(original, 0.8, 12., 1.);
// better settings (in my opinion)
//Mat current = usm(original, 2., 1., 3.);
cv::imwrite("current.png", current);
// comparison plot
cv::Rect crop(127, 505, 163, 120);
cv::Mat crops[3];
cv::resize(original(crop), crops[0], {0,0}, 4, 4, cv::INTER_NEAREST);
cv::resize(expected(crop), crops[1], {0,0}, 4, 4, cv::INTER_NEAREST);
cv::resize( current(crop), crops[2], {0,0}, 4, 4, cv::INTER_NEAREST);
char const* texts[] = {"original", "photoshop", "current"};
cv::Mat plot = cv::Mat::zeros(120 * 4, 163 * 4 * 3, CV_8UC3);
for(int i = 0; i < 3; ++i) {
cv::Rect region(163 * 4 * i, 0, 163 * 4, 120 * 4);
crops[i].copyTo(plot(region));
cv::putText(plot, texts[i], region.tl() + cv::Point{5,40},
cv::FONT_HERSHEY_SIMPLEX, 1.5, CV_RGB(255, 0, 0), 2.0);
}
cv::imwrite("plot.png", plot);
}
Here's my attempt at 'smart' unsharp masking. Result isn't very good, but I'm posting anyway. Wikipedia article on unsharp masking has details about smart sharpening.
Several things I did differently:
Convert BGR to Lab color space and apply the enhancements to the brightness channel
Use an edge map to apply enhancement to the edge regions
Original:
Enhanced: sigma=2 amount=3 low=0.3 high=.8 w=2
Edge map: low=0.3 high=.8 w=2
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include <cstring>
cv::Mat not_so_smart_sharpen(
const cv::Mat& bgr,
double sigma,
double amount,
double canny_low_threshold_weight,
double canny_high_threshold_weight,
int edge_weight)
{
cv::Mat enhanced_bgr, lab, enhanced_lab, channel[3], blurred, difference, bw, kernel, edges;
// convert to Lab
cv::cvtColor(bgr, lab, cv::ColorConversionCodes::COLOR_BGR2Lab);
// perform the enhancement on the brightness component
cv::split(lab, channel);
cv::Mat& brightness = channel[0];
// smoothing for unsharp masking
cv::GaussianBlur(brightness, blurred, cv::Size(0, 0), sigma);
difference = brightness - blurred;
// calculate an edge map. I'll use Otsu threshold as the basis
double thresh = cv::threshold(brightness, bw, 0, 255, cv::ThresholdTypes::THRESH_BINARY | cv::ThresholdTypes::THRESH_OTSU);
cv::Canny(brightness, edges, thresh * canny_low_threshold_weight, thresh * canny_high_threshold_weight);
// control edge thickness. use edge_weight=0 to use Canny edges unaltered
cv::dilate(edges, edges, kernel, cv::Point(-1, -1), edge_weight);
// unsharp masking on the edges
cv::add(brightness, difference * amount, brightness, edges);
// use the enhanced brightness channel
cv::merge(channel, 3, enhanced_lab);
// convert to BGR
cv::cvtColor(enhanced_lab, enhanced_bgr, cv::ColorConversionCodes::COLOR_Lab2BGR);
// cv::imshow("edges", edges);
// cv::imshow("difference", difference * amount);
// cv::imshow("original", bgr);
// cv::imshow("enhanced", enhanced_bgr);
// cv::waitKey(0);
return enhanced_bgr;
}
int main(int argc, char *argv[])
{
double sigma = std::stod(argv[1]);
double amount = std::stod(argv[2]);
double low = std::stod(argv[3]);
double high = std::stod(argv[4]);
int w = std::stoi(argv[5]);
cv::Mat bgr = cv::imread("flower.jpg");
cv::Mat enhanced = not_so_smart_sharpen(bgr, sigma, amount, low, high, w);
cv::imshow("original", bgr);
cv::imshow("enhanced", enhanced);
cv::waitKey(0);
return 0;
}

My Histogram is not showing for my Binary image OpenCV C++

Overall goal is to be able to read the histogram from binary image in order to crop the image.
My code works, but for my binary image, histogram is not showing properly (the histogram is blank)
Can anybody tell me whats wrong with my code?
Histogram is working for RGB image as well as Grey image
I would like to be able to get the histogram of the binary image
#include <opencv2/highgui/highgui.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<iostream>
using namespace std;
using namespace cv;
void show_histogram(std::string const& name, cv::Mat1b const& image)
{
// Set histogram bins count
int bins = 255;
int histSize[] = { bins };
// Set ranges for histogram bins
float lranges[] = { 0, 255 };
const float* ranges[] = { lranges };
// create matrix for histogram
cv::Mat hist;
int channels[] = { 0 };
// create matrix for histogram visualization
int const hist_height = 255;
cv::Mat1b hist_image = cv::Mat1b::zeros(hist_height, bins);
cv::calcHist(&image, 1, channels, cv::Mat(), hist, 1, histSize, ranges, true, false);
double max_val = 0;
minMaxLoc(hist, 0, &max_val);
// visualize each bin
for (int b = 0; b < bins; b++) {
float const binVal = hist.at<float>(b);
int const height = cvRound(binVal*hist_height / max_val);
cv::line
(hist_image
, cv::Point(b, hist_height - height), cv::Point(b, hist_height)
, cv::Scalar::all(255)
);
}
cv::imshow(name, hist_image);
}
int main()
{
Mat Rgb;
Mat Grey;
Mat Binary;
//Mat Histogram;
Rgb = imread("license.jpg", WINDOW_AUTOSIZE);
cvtColor(Rgb, Grey, cv::COLOR_BGR2GRAY);
threshold(Grey, Binary, 150, 250, THRESH_BINARY);
//namedWindow("RGB");
//namedWindow("Grey");
namedWindow("Binary");
//imshow("RGB", Rgb);
imshow("Gray", Grey);
imshow("Binary", Binary);
show_histogram("Histogram1", Grey);
show_histogram("Histogram2", Binary);
waitKey(0);
cv::destroyAllWindows();
return 0;
}

How to use fourier transform pair on image using opencv correctly?

Firstly, I utilize putText function to create a zero-filled image:
std::string text("Mengranlin");
int rows = 222;
int cols = 112;
double textSize = 1.5;
int textWidth = 2;
int num = 255;
cv::Mat zero_filled_img = cv::Mat::zeros(cols, rows, CV_32F);
putText(zero_filled_img, text,
cv::Point(zero_filled_img.cols * 0.5,
zero_filled_img.rows * 0.3),
cv::FONT_HERSHEY_PLAIN, textSize, cv::Scalar(num, num, num), textWidth);
cv::Mat zero_filled_img2;
flip(zero_filled_img, zero_filled_img2, -1);
zero_filled_img += zero_filled_img2;
transpose(zero_filled_img, zero_filled_img);
flip(zero_filled_img, zero_filled_img, 1);
Here is the image:
Secondly, I utilize inverse Fourier transform to the image:
int m = getOptimalDFTSize(rows);
int n = getOptimalDFTSize(cols);
cv::Mat dst;
copyMakeBorder(zero_filled_img, dst, 0, m - rows, 0, n - cols, BORDER_CONSTANT, Scalar::all(0));
cv::Mat planes[] = { cv::Mat_<float>(dst),
cv::Mat::zeros(dst.size(), CV_32F) };
cv::Mat complex;
cv::merge(planes,2, complex);
idft(complex, complex);
split(complex, planes);
magnitude(planes[0], planes[1], planes[0]);
Thirdly, I utilize Fourier transform to the result of inverse Fourier transform:
cv::merge(planes2, 2, complex);
dft(complex, complex);
split(complex, planes2);
magnitude(planes2[0], planes2[1], planes2[0]);
cv::Mat result = planes2[0];
Finally, I save the image:
result += 1;
log(result, result);
result = result(cv::Rect(0, 0, cols, rows));
int cx = result.cols / 2;
int cy = result.rows / 2;
cv::Mat temp;
cv::Mat q0(result, cv::Rect(0, 0, cx, cy));
cv::Mat q1(result, cv::Rect(cx, 0, cx, cy));
cv::Mat q2(result, cv::Rect(0, cy, cx, cy));
cv::Mat q3(result, cv::Rect(cx, cy, cx, cy));
q0.copyTo(temp);
q3.copyTo(q0);
temp.copyTo(q3);
q1.copyTo(temp);
q2.copyTo(q1);
temp.copyTo(q2);
imwrite("./image/log_result.jpg", result);
Here is the image:
Although the "Mengnalin" can be found from the image, that is very weak. And then, I save the normalization of the result, but I found nothing:
normalize(result, result);
imwrite("./image/normalize_result.jpg", result);
result *= 255;
imwrite("./image/normalize_result255.jpg", result);
Here is the normalization image:
Here is the normalization image x 255:
The experiment is successful when using Matlab. I want to know where the error is?
Below is the complete code that I ran:
std::string text("Mengranlin");
int rows = 222;
int cols = 112;
double textSize = 1.5;
int textWidth = 2;
int num = 255;
cv::Mat zero_filled_img = cv::Mat::zeros(cols, rows, CV_32F);
putText(zero_filled_img, text, cv::Point(zero_filled_img.cols * 0.5, zero_filled_img.rows * 0.3),
cv::FONT_HERSHEY_PLAIN, textSize, cv::Scalar(num, num, num), textWidth);
cv::Mat zero_filled_img2;
flip(zero_filled_img, zero_filled_img2, -1);
zero_filled_img += zero_filled_img2;
transpose(zero_filled_img, zero_filled_img);
flip(zero_filled_img, zero_filled_img, 1);
cv::Mat de = cv::Mat_<uchar>(zero_filled_img);
cv::imwrite("./image/zero_filled_img.jpg", zero_filled_img);
//idft
int m = getOptimalDFTSize(rows);
int n = getOptimalDFTSize(cols);
cv::Mat dst;
copyMakeBorder(zero_filled_img, dst, 0, m - rows, 0, n - cols, BORDER_CONSTANT, Scalar::all(0));
cv::Mat planes[] = { cv::Mat_<float>(dst), cv::Mat::zeros(dst.size(), CV_32F) };
cv::Mat complex;
cv::merge(planes,2, complex);
idft(complex, complex);
split(complex, planes);
magnitude(planes[0], planes[1], planes[0]);
cv::Mat freq = planes[0];
freq = freq(cv::Rect(0, 0, cols, rows));
normalize(freq, freq, 0, 1, CV_MINMAX);
//dft
cv::Mat planes2[] = {planes[0], planes[1]};
cv::merge(planes2, 2, complex);
dft(complex, complex);
split(complex, planes2);
magnitude(planes2[0], planes2[1], planes2[0]);
cv::Mat result = planes2[0];
//float min_v, max_v; min_max(result, min_v, max_v);
imwrite("./image/img.jpg", result);
result += 1;
imwrite("./image/img_plus_zero.jpg", result);
log(result, result);
result = result(cv::Rect(0, 0, cols, rows));
//float min_v1, max_v1; min_max(result, min_v1, max_v1);
imwrite("./image/log_img.jpg", result);
int cx = result.cols / 2;
int cy = result.rows / 2;
cv::Mat temp;
cv::Mat q0(result, cv::Rect(0, 0, cx, cy));
cv::Mat q1(result, cv::Rect(cx, 0, cx, cy));
cv::Mat q2(result, cv::Rect(0, cy, cx, cy));
cv::Mat q3(result, cv::Rect(cx, cy, cx, cy));
q0.copyTo(temp);
q3.copyTo(q0);
temp.copyTo(q3);
q1.copyTo(temp);
q2.copyTo(q1);
temp.copyTo(q2);
normalize(result, result);
imwrite("./image/normalize_img.jpg", result);
result *= 255;
imwrite("./image/normalize_img255.jpg", result);
Your code splits the output of idft into planes[0] (real component) and planes[1] (imaginary component), then computes the magnitude and writes it to planes[0]:
idft(complex, complex);
split(complex, planes);
magnitude(planes[0], planes[1], planes[0]);
Next, you merge planes[0] and planes[1] as the real and imaginary parts of a complex-valued image, and compute the dft:
cv::Mat planes2[] = {planes[0], planes[1]};
cv::merge(planes2, 2, complex);
dft(complex, complex);
But because planes[0] doesn't contain the real part of the output of idft any more, but its magnitude, dft will not perform the inverse calculation that idft did.
You can fix this easily. Instead of:
magnitude(planes[0], planes[1], planes[0]);
cv::Mat freq = planes[0];
Do:
cv::Mat freq;
magnitude(planes[0], planes[1], freq);
You can significantly simplify your code. Try the following code (zero_filled_img is the input image computed earlier):
// DFT
cv::Mat complex;
dft(zero_filled_img, complex, DFT_COMPLEX_OUTPUT);
// IDFT
cv::Mat result;
idft(complex, result, DFT_REAL_OUTPUT);
imwrite("./image/img.jpg", result);
result should be equal to zero_filled_img within numerical accuracy.
The DFT_COMPLEX_OUTPUT flag forces the creation of a full, complex-valued DFT, even though the input array is real-valued. Likewise, DFT_REAL_OUTPUT causes any imaginary output components to be dropped, this is equivalent to computing the complex IDFT and then taking the real part only.
I have reversed the DFT and IDFT to be conceptually correct (though it is perfectly fine to reverse these two operations). DFT_COMPLEX_OUTPUT only works with the forward transform and DFT_REAL_OUTPUT only works with the inverse transform, so the code above will not work (I believe) if you use these two operations in the order you attempted in your own code.
The code above also doesn't bother with padding to a favourable size. Doing so might reduce computation time, but for such a small image it will not matter at all.
Note also that taking the magnitude of the output of the inverse transform (the second transform you apply) is OK in your case, but not in general. This second transform is expected to produce a real-valued output (since the input to the first one was real-valued). Any imaginary component should be 0 within numerical precision. Thus, the real component of the complex output should be kept. If you take the magnitude, you obtain the absolute value of the real component, meaning that any negative values in the original input will become positive values in the final output. In the case of the example images, all pixels are non-negative, but this is not necessarily true. Do the correct thing and take the real component rather than the magnitude.

Improper usage of calcHist

My aim is to generate a histogram for a gray-scale image. The code I used is :
Mat img = imread("leeds-castle.jpg",IMREAD_GRAYSCALE);
Mat hst;
int hstsize = 256;
float ranges[] = { 0,256 };
float *hstrange = { ranges };
calcHist( img, 1,0, Mat(), hst, 1, &hstsize,&hstrange,true,false);
int hst_w = 512, hst_h = 400;
int bin_w = cvRound((double)hst_w / 256);
Mat histimg(hst_w, hst_h, CV_8U);
normalize(hst, hst, 0, histimg.rows, NORM_MINMAX, -1, Mat());
for (int i = 1; i < 256; i++)
{
line(histimg, Point(bin_w*(i - 1), hst_h - cvRound(hst.at<float>(i - 1))), Point(bin_w*i, hst_h - cvRound(hst.at<float>(i))), 2, 8, 0);
}
imshow("Histogram", histimg);
The only error is the usage of calcHist() function. Is there anything wrong with it?
See the comment above calcHist to identify the correct usage:
// original image
Mat img = imread("leeds-castle.jpg",IMREAD_GRAYSCALE);
// NOTE: check if img.channels is equal to 1
// histogram
Mat hst;
// number of bins
int hstsize = 256;
float ranges[] = { 0,256 };
float *hstrange = { ranges };
// parameters for histogram calculation
bool uniform = true;
bool accumulate = false;
// calculate histogram
// the '&' was missing here
calcHist( &img, 1,0, Mat(), hst, 1, &hstsize,&hstrange,true,false);

fftshift c++ implemetation for openCV

I have already looked in this question
fftshift/ifftshift C/C++ source code
I'm trying to implement fftshift from matlab
this is the code from the matlab function for 1D array
numDims = ndims(x);
idx = cell(1, numDims);
for k = 1:numDims
m = size(x, k);
p = ceil(m/2);
idx{k} = [p+1:m 1:p];
end
y = x(idx{:});
my c++/openCV code is, what fftshift basically does is swap the values from a certain pivot place.
since I can't seem to understand how is the matrix built in opencv for complex numbers.
it says here http://docs.opencv.org/modules/core/doc/operations_on_arrays.html#dft
CCS (complex-conjugate-symmetrical
I thought it will be easier to split the complex numbers into real and imaginary and swap them. and then merge back to one matrix.
cv::vector<float> distanceF (f.size());
//ff = fftshift(ff);
cv::Mat ff;
cv::dft(distanceF, ff, cv::DFT_COMPLEX_OUTPUT);
//Make place for both the complex and the real values
cv::Mat planes[] = {cv::Mat::zeros(distanceF.size(),1, CV_32F), cv::Mat::zeros(distanceF.size(),1, CV_32F)};
cv::split(ff, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
int numDims = ff.dims;
for (int i = 0; i < numDims; i++)
{
int m = ff.rows;
int p = ceil(m/2);
}
my problem is that because of my input to the DFT is a vector<float> I can't seem to be able to create planes mat in order to split the complex numbers?
Can you think how a better way to make the swap of the values inside the cv::mat data struct?
Ok, this thread is may be out of date in the meantime but maybe for other users.. Take a look at the samples:
opencv/samples/cpp/dft.cpp (line 66 - 80)
int cx = mag.cols/2;
int cy = mag.rows/2;
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
Mat tmp;
Mat q0(mag, Rect(0, 0, cx, cy));
Mat q1(mag, Rect(cx, 0, cx, cy));
Mat q2(mag, Rect(0, cy, cx, cy));
Mat q3(mag, Rect(cx, cy, cx, cy));
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
I think that's a short and clean way for different dimensions.
I know, this is quite an old thread, but I found it today while looking for a solution to shift the fft-result. and maybe the little function I wrote with the help of this site and other sources, could be helpful for future readers searching the net and ending up here too.
bool FftShift(const Mat& src, Mat& dst)
{
if(src.empty()) return true;
const uint h=src.rows, w=src.cols; // height and width of src-image
const uint qh=h>>1, qw=w>>1; // height and width of the quadrants
Mat qTL(src, Rect( 0, 0, qw, qh)); // define the quadrants in respect to
Mat qTR(src, Rect(w-qw, 0, qw, qh)); // the outer dimensions of the matrix.
Mat qBL(src, Rect( 0, h-qh, qw, qh)); // thus, with odd sizes, the center
Mat qBR(src, Rect(w-qw, h-qh, qw, qh)); // line(s) get(s) omitted.
Mat tmp;
hconcat(qBR, qBL, dst); // build destination matrix with switched
hconcat(qTR, qTL, tmp); // quadrants 0 & 2 and 1 & 3 from source
vconcat(dst, tmp, dst);
return false;
}
How about using adjustROI and copyTo instead of .at()? It would certainly be more efficient:
Something in the lines of (for your 1D case):
Mat shifted(ff.size(),ff.type());
pivot = ff.cols / 2;
ff(Range::all(),Range(pivot + 1, ff.cols)).copyTo(shifted(Range::all(),Range(0,pivot)));
ff(Range::all(),Range(0,pivot+1)).copyTo(shifted(Range::all(),Range(pivot,ff.cols)));
For the 2D case, two more lines should be added, and the rows ranges modified...
I have been implementing it myself based on this post, I used Fabian implementation which is working fine. But there is a problem when there is an odd number of row or column, the shift is then not correct.
You need then to padd your matrix and after to get rid of the extra row or column.
{bool flag_row = false;
bool flag_col = false;
if( (inputMatrix.rows % 2)>0)
{
cv::Mat row = cv::Mat::zeros(1,inputMatrix.cols, CV_64F);
inputMatrix.push_back(row);
flag_row =true;
}
if( (inputMatrix.cols % 2)>0)
{
cv::Mat col = cv::Mat::zeros(1,inputMatrix.rows, CV_64F);
cv::Mat tmp;
inputMatrix.copyTo(tmp);
tmp=tmp.t();
tmp.push_back(col);
tmp=tmp.t();
tmp.copyTo(inputMatrix);
flag_col = true;
}
int cx = inputMatrix.cols/2.0;
int cy = inputMatrix.rows/2.0;
cv::Mat outputMatrix;
inputMatrix.copyTo(outputMatrix);
// rearrange the quadrants of Fourier image
// so that the origin is at the image center
cv::Mat tmp;
cv::Mat q0(outputMatrix, cv::Rect(0, 0, cx, cy));
cv::Mat q1(outputMatrix, cv::Rect(cx, 0, cx, cy));
cv::Mat q2(outputMatrix, cv::Rect(0, cy, cx, cy));
cv::Mat q3(outputMatrix, cv::Rect(cx, cy, cx, cy));
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp);
q2.copyTo(q1);
tmp.copyTo(q2);
int row = inputMatrix.rows;
int col = inputMatrix.cols;
if(flag_row)
{
outputMatrix = Tools::removerow(outputMatrix,row/2-1);
}
if(flag_col)
{
outputMatrix = Tools::removecol(outputMatrix,col/2-1);
}
return outputMatrix;
Here is what I do (quick and dirty, can be optimized):
// taken from the opencv DFT example (see opencv/samples/cpp/dft.cpp within opencv v440 sourcecode package)
cv::Mat fftshift(const cv::Mat& mat){
// create copy to not mess up the original matrix (ret is only a "window" over the provided matrix)
cv::Mat cpy;
mat.copyTo(cpy);
// crop the spectrum, if it has an odd number of rows or columns
cv::Mat ret = cpy(cv::Rect(0, 0, cpy.cols & -2, cpy.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = ret.cols/2;
int cy = ret.rows/2;
cv::Mat q0(ret, cv::Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
cv::Mat q1(ret, cv::Rect(cx, 0, cx, cy)); // Top-Right
cv::Mat q2(ret, cv::Rect(0, cy, cx, cy)); // Bottom-Left
cv::Mat q3(ret, cv::Rect(cx, cy, cx, cy)); // Bottom-Right
cv::Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
return ret;
}
// reverse the swapping of fftshift. (-> reverse the quadrant swapping)
cv::Mat ifftshift(const cv::Mat& mat){
// create copy to not mess up the original matrix (ret is only a "window" over the provided matrix)
cv::Mat cpy;
mat.copyTo(cpy);
// crop the spectrum, if it has an odd number of rows or columns
cv::Mat ret = cpy(cv::Rect(0, 0, cpy.cols & -2, cpy.rows & -2));
// rearrange the quadrants of Fourier image so that the origin is at the image center
int cx = ret.cols/2;
int cy = ret.rows/2;
cv::Mat q0(ret, cv::Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
cv::Mat q1(ret, cv::Rect(cx, 0, cx, cy)); // Top-Right
cv::Mat q2(ret, cv::Rect(0, cy, cx, cy)); // Bottom-Left
cv::Mat q3(ret, cv::Rect(cx, cy, cx, cy)); // Bottom-Right
cv::Mat tmp; // swap quadrants (Bottom-Right with Top-Left)
q3.copyTo(tmp);
q0.copyTo(q3);
tmp.copyTo(q0);
q2.copyTo(tmp); // swap quadrant (Bottom-Left with Top-Right)
q1.copyTo(q2);
tmp.copyTo(q1);
return ret;
}
There are no implementations in earlier answers that work correctly for odd-sized images.
fftshift moves the origin from the top-left to the center (at size/2).
ifftshift moves the origin from the center to the top-left.
These two actions are identical for even sizes, but differ for odd-sizes.
For an odd size, fftshift swaps the first (size+1)/2 pixels with the remaining size/2 pixels, which moves the pixel at index 0 to size/2. ifftshift does the reverse, swapping the first size/2 pixels with the remaining (size+1)/2 pixels. This code is the most simple implementation of both these actions that I can come up with. (Note that (size+1)/2 == size/2 if size is even.)
bool forward = true; // true for fftshift, false for ifftshift
cv::Mat img = ...; // the image to process
// input sizes
int sx = img.cols;
int sy = img.rows;
// size of top-left quadrant
int cx = forward ? (sx + 1) / 2 : sx / 2;
int cy = forward ? (sy + 1) / 2 : sy / 2;
// split the quadrants
cv::Mat top_left(img, cv::Rect(0, 0, cx, cy));
cv::Mat top_right(img, cv::Rect(cx, 0, sx - cx, cy));
cv::Mat bottom_left(img, cv::Rect(0, cy, cx, sy - cy));
cv::Mat bottom_right(img, cv::Rect(cx, cy, sx - cx, sy - cy));
// merge the quadrants in right order
cv::Mat tmp1, tmp2;
cv::hconcat(bottom_right, bottom_left, tmp1);
cv::hconcat(top_right, top_left, tmp2);
cv::vconcat(tmp1, tmp2, img);
This code makes a copy of the full image twice, but it is easy and quick to implement. A more performant implementation would swap values in-place. This answer has correct code to do so on a single line, it would have to be applied to each column and each row of the image.
this is for future reference:
been tested and is bit accurate for 1D
cv::Mat ff;
cv::dft(distanceF, ff, cv::DFT_ROWS|cv::DFT_COMPLEX_OUTPUT);
//Make place for both the complex and the real values
cv::Mat planes[] = {cv::Mat::zeros(distanceF.size(),1, CV_32F), cv::Mat::zeros(distanceF.size(),1, CV_32F)};
cv::split(ff, planes); // planes[0] = Re(DFT(I), planes[1] = Im(DFT(I))
int m = planes[0].cols;
int pivot = ceil(m/2);
//duplicate FFT results with Complex conjugate in order to get exact matlab results
for (int i = pivot + 1, k = pivot; i < planes[1].cols; i++, k--)
{
planes[1].at<float>(i) = planes[1].at<float>(k) * -1;
planes[0].at<float>(i) = planes[0].at<float>(k);
}
//TODO maybe we need to see what happens for pair and odd ??
float im = planes[1].at<float>(0);
float re = planes[0].at<float>(0);
for (int i = 0; i < pivot; i++)
{
//IM
planes[1].at<float>(i) = planes[1].at<float>(pivot + i +1);
planes[1].at<float>(pivot +i +1) = planes[1].at<float>(i +1);
//Real
planes[0].at<float>(i) = planes[0].at<float>(pivot + i +1);
planes[0].at<float>(pivot +i +1) = planes[0].at<float>(i +1);
}
planes[1].at<float>(pivot) = im;
planes[0].at<float>(pivot) = re;
In Matlab's implementation, the main code are the two lines:
idx{k} = [p+1:m 1:p];
y = x(idx{:});
The first one obtains the correct index order against the original one; then the second one assigns the output array according to the index order. Therefore, if you want to re-write Matlab's implementation without data swapping, you need to allocate a new array and assign the array.