deblurring image by deconvolution using opencv - c++

I have two images o1 & o2, and I have blurred the two images using the same Gaussian blurring kernel. Then I have found kernel k1 = DFT(b1) / DFT (o1), where b1 is the image obtained by blurring o1.
I have used this kernal (k1) to perform deconvolution on b2, where b2 is obtained by blurring o2.
But deblurred output is not correct (the output image does not have any relation with original) what is the problem in my code ?
int main(int argc, char** argv)
{
Mat orig1 = imread(argv[1], 0);
Mat orig2 = imread(argv[2], 0);
Mat blur1, blur2;
GaussianBlur(orig1, blur1, Size(11, 11), 0, 0 );
GaussianBlur(orig2, blur2, Size(11, 11), 0, 0 );
imshow("or1", orig1);
imshow("bl1", blur1);
imshow("or2", orig2);
imshow("bl2", blur2);
waitKey(0);
deconvolution(orig1, blur1, orig2, blur2);
return 0;
}
void deconvolution(Mat & o1, Mat & b1, Mat & o2, Mat & b2)
{
Mat o1f, o2f, b1f, b2f;
Mat o1dft, o2dft, b1dft, b2dft;
o1.convertTo(o1f, CV_32F);
b1.convertTo(b1f, CV_32F);
o2.convertTo(o2f, CV_32F);
b2.convertTo(b2f, CV_32F);
computeDFT(o1f, o1dft);
computeDFT(b1f, b1dft);
computeDFT(o2f, o2dft);
computeDFT(b2f, b2dft);
Mat k1, k2, b1d, b2d;
divide(b1dft, o1dft, k1);
Mat r1, r2;
divide(b1dft, k1, r1);
divide(b2dft, k1, r2);
Mat idftr1, idftr2;
computeIDFT(r1, idftr1);
computeIDFT(r2, idftr2);
Mat r1_8u, r2_8u;
idftr1.convertTo(r1_8u, CV_8U);
idftr2.convertTo(r2_8u, CV_8U);
imshow("r1", r1_8u);
imshow("r2", r2_8u);
waitKey(0);
destroyAllWindows();
}
Images o1, o2, b1, b2, r1 and r2 are given in order below:

The problem is most likely that your blurring kernel has vanishing coefficients for certain frequencies. For each coefficient of the transform of your signal (f) and blurring kernel (h), you calculate f/h right now. This is effectively a division by zero for these coefficients, resulting in the strong noise you observe.
A quick solution for this would be pseudo-inverse filtering:
use f/h only for |h| > epsilon
set coefficient to 0 else
If this isn't smooth enough, you can get better results with
wiener filtering.

Related

How to detect Blur rate of a face effectively in c++?

I am trying to detect blur rate of the face images with below code.
cv::Mat greyMat;
cv::Mat laplacianImage;
cv::Mat imageClone = LapMat.clone();
cv::resize(imageClone, imageClone, cv::Size(150, 150), 0, 0, cv::INTER_CUBIC);
cv::cvtColor(imageClone, greyMat, CV_BGR2GRAY);
Laplacian(greyMat, laplacianImage, CV_64F);
cv::Scalar mean, stddev; // 0:1st channel, 1:2nd channel and 2:3rd channel
meanStdDev(laplacianImage, mean, stddev, cv::Mat());
double variance = stddev.val[0] * stddev.val[0];
cv::Mat M = (cv::Mat_(3, 1) << -1, 2, -1);
cv::Mat G = cv::getGaussianKernel(3, -1, CV_64F);
cv::Mat Lx;
cv::sepFilter2D(LapMat, Lx, CV_64F, M, G);
cv::Mat Ly;
cv::sepFilter2D(LapMat, Ly, CV_64F, G, M);
cv::Mat FM = cv::abs(Lx) + cv::abs(Ly);
double focusMeasure = cv::mean(FM).val[0];
return focusMeasure;
it some times gives not good results as attached picture.
Is there a best practice way to detect blurry faces ?
I attached an example image which is high scored with above code which is false.
Best
I'm not sure how are you interpreting your results. To measure blur, you usually take the output of the Blur Detector (a number) and compare it against a threshold value, then determine if the input is, in fact, blurry or not. I don't see such a comparison in your code.
There are several ways to measure "blurriness", or rather, sharpness. Let's take a look at one. It involves computing the variance of the Laplacian and then comparing it to an expected value. This is the code:
//read the image and convert it to grayscale:
cv::Mat inputImage = cv::imread( "dog.png" );
cv::Mat gray;
cv::cvtColor( inputImage, gray, cv::COLOR_RGB2GRAY );
//Cool, let's compute the laplacian of the gray image:
cv::Mat laplacianImage;
cv::Laplacian( gray, laplacianImage, CV_64F );
//Prepare to compute the mean and standard deviation of the laplacian:
cv::Scalar mean, stddev;
cv::meanStdDev( laplacianImage, mean, stddev, cv::Mat() );
//Let’s compute the variance:
double variance = stddev.val[0] * stddev.val[0];
Up until this point, we've effectively calculated the variance of the Laplacian, but we still need to compare against a threshold:
double blurThreshold = 300;
if ( variance <= blurThreshold ) {
std::cout<<"Input image is blurry!"<<std::endl;
} else {
std::cout<<"Input image is sharp"<<std::endl;
}
Let’s check out the results. These are my test images. I've printed the variance value in the lower-left corner of the images. The threshold value is 300, blue text is within limits, red text is below.

Performing convolution in frequency domain manually but getting wrong output image in CPP/Opencv

I followed the following steps:-
1. Calculated dft of image
2. Calculated dft of kernel (but 1st padded it to size of image)
3. Multiplied real and imaginary parts of both dft individually
4. Calculated inverse dft
I tried to display the images in each intermediate step but the final image comes out to be almost black except in corners.
Image fourier transform output after multiplication and its inverse dft output
input image
enter code here
#include <iostream>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
#include <stdio.h>
int r=100;
#define SIGMA_CLIP 6.0f
using namespace cv;
using namespace std;
void updateResult(Mat complex)
{
Mat work;
idft(complex, work);
Mat planes[] = {Mat::zeros(complex.size(), CV_32F), Mat::zeros(complex.size(), CV_32F)};
split(work, planes); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], work); // === sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)
normalize(work, work, 0, 1, NORM_MINMAX);
imshow("result", work);
}
void shift(Mat magI) {
// crop if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
}
Mat updateMag(Mat complex )
{
Mat magI;
Mat planes[] = {Mat::zeros(complex.size(), CV_32F), Mat::zeros(complex.size(), CV_32F)};
split(complex, planes); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], magI); // sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)
// switch to logarithmic scale: log(1 + magnitude)
magI += Scalar::all(1);
log(magI, magI);
shift(magI);
normalize(magI, magI, 1, 0, NORM_INF); // Transform the matrix with float values into a
return magI; // viewable image form (float between values 0 and 1).
//imshow("spectrum", magI);
}
Mat createGausFilterMask(Size imsize, int radius) {
// call openCV gaussian kernel generator
double sigma = (r/SIGMA_CLIP+0.5f);
Mat kernelX = getGaussianKernel(2*radius+1, sigma, CV_32F);
Mat kernelY = getGaussianKernel(2*radius+1, sigma, CV_32F);
// create 2d gaus
Mat kernel = kernelX * kernelY.t();
int w = imsize.width-kernel.cols;
int h = imsize.height-kernel.rows;
int r = w/2;
int l = imsize.width-kernel.cols -r;
int b = h/2;
int t = imsize.height-kernel.rows -b;
Mat ret;
copyMakeBorder(kernel,ret,t,b,l,r,BORDER_CONSTANT,Scalar::all(0));
return ret;
}
//code reference https://docs.opencv.org/2.4/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.html
int main( int argc, char** argv )
{
String file;
file = "lena.png";
Mat image = imread(file, CV_LOAD_IMAGE_GRAYSCALE);
Mat padded;
int m = getOptimalDFTSize( image.rows );
int n = getOptimalDFTSize( image.cols );
copyMakeBorder(image, padded, 0, m - image.rows, 0, n -image.cols, BORDER_CONSTANT, Scalar::all(0));//expand input image to optimal size , on the border add zero values
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI); //computing dft
split(complexI, planes); //image converted to complex and real dft here
Mat mask = createGausFilterMask(padded.size(),r ); // Forming the gaussian filter
Mat mplane[] = {Mat_<float>(mask), Mat::zeros(mask.size(), CV_32F)};
Mat kernelcomplex;
merge(mplane, 2, kernelcomplex);
dft(kernelcomplex, kernelcomplex);
split(kernelcomplex, mplane);// splitting the dft of kernel to real and complex
mplane[1]=mplane[0]; //overwriting imaginary values with real values of kernel dft
Mat kernel_spec;
merge(mplane, 2, kernel_spec);
mulSpectrums(complexI, kernel_spec, complexI, DFT_ROWS);
Mat magI=updateMag(complexI);
namedWindow( "image fourier", CV_WINDOW_AUTOSIZE );
imshow("spectrum magnitude", magI);
updateResult(complexI); //converting to viewable form, computing idft
waitKey(0);
return 0;
}
Which step is going wrong? Or am i missing on to some concept?
Edited the code with help of Cris and it now works perfectly.
There are two immediately apparent issues:
The Gaussian is real-valued and symmetric. Its Fourier transform should be too. If the DFT of your kernel has a non-zero imaginary component, you're doing something wrong.
Likely, what you are doing wrong is that your kernel has its origin in the middle of the image, rather than at the top-left sample. This is the same issue as in this other question. The solution is to use the equivalent of MATLAB's ifftshift, an implementation of which is shown in the OpenCV documentation ("step 6, Crop and rearrange").
To apply the convolution, you need to multiply the two DFTs together, not the real parts and imaginary parts of the DFTs. Multiplying two complex numbers a+ib and c+id results in ac-bd+iad+ibc, not ac+ibd.
But since the DFT of your kernel should be real-valued only, you can simply multiply the real component of the kernel with both the real and imaginary components of the image: (a+ib)c = ac+ibc.
It seems very roundabout what you are doing with the complex-valued images. Why not let OpenCV handle all of that for you? You can probably* just do something like this:
Mat image = imread(file, CV_LOAD_IMAGE_GRAYSCALE);
// Expand input image to optimal size, on the border add zero values
Mat padded;
int m = getOptimalDFTSize(image.rows);
int n = getOptimalDFTSize(image.cols);
copyMakeBorder(image, padded, 0, m - image.rows, 0, n -image.cols, BORDER_CONSTANT, Scalar::all(0));
// Computing DFT
Mat DFTimage;
dft(padded, DFTimage);
// Forming the Gaussian filter
Mat kernel = createGausFilterMask(padded.size(), r);
shift(kernel);
Mat DFTkernel;
dft(kernel, DFTkernel);
// Convolution
mulSpectrums(DFTimage, DFTkernel, DFTimage, DFT_ROWS);
// Display Fourier-domain result
Mat magI = updateMag(DFTimage);
imshow("spectrum magnitude", magI);
// IDFT
Mat work;
idft(complex, work); // <- NOTE! Don't inverse transform log-transformed magnitude image!
Note that the Fourier-Domain result is actually a special representation of the complex-conjugate symmetric DFT, intended to save space and computations. To compute the full complex output, add the DFT_COMPLEX_OUTPUT to the call to dft, and DFT_REAL_OUTPUT to the call to idft (this latter then assumes symmetry, and produces a real-valued output, saving you the hassle of computing the magnitude).
* I say probably because I haven't compiled any of this... If there's something wrong, please let me know, or edit the answer and fix it.

OpenCV: Issue with Stereocamera Tracking using cv::triangulatepoints()

I'm trying to use the cv::triangulatePoints() function for stereocamera tracking of a checkerboard pattern, using two off the shelf webcams. I calibrated my setup using the Stereocamera Calibration Toolbox from MATLAB, which I then used in my OpenCV code.
My issue is that when I get coordinates from cv::triangulatePoints() (after I convert it into euclidean space) they do not form a plane of points when 3D plotting them into MATLAB. I was wondering if there is a bug in my code that I have overlooked?
The code I'm using is listed below. Any insight would help greatly!
Mat cameraMat1 = (Mat_<double>(3,3) << 1411.3, 2.2527, 958.3516,
0, 1404.1, 566.6821,
0, 0, 1);
Mat distCoeff1 =(Mat_<double>(5,1) << 0.0522,
-0.1651,
0.0023,
0.0020,
0.0924);
Mat cameraMat2 = (Mat_<double>(3,3) << 1413.7, -1.2189, 968.5768,
0, 1408.1, 505.1645,
0, 0, 1);
Mat distCoeff2 =(Mat_<double>(5,1) << 0.0465,
-0.1948,
-0.0013,
-0.00016774,
0.1495);
Mat R = (Mat_<double>(3,3) << 0.9108, 0.0143, -0.4127, -0.0228, 0.9996, -0.0157, 0.4123, 0.0237, 0.9107);
Mat T = (Mat_<double>(3,1) << -209.4118, 0.2208, 49.1987);
Mat R1, R2, P1, P2, Q;
Size imSize = Size(1920,1080); //Pixel Resolution
Mat frame1, frame2;
vector<Point2f> foundCorners1;
vector<Point2f> foundCorners2;
Size chessSize(11,8);
//for undistort
vector<Point2f> ufoundCorners1;
vector<Point2f> ufoundCorners2;
Mat homopnts3D(4, foundCorners1.size(), CV_64F);
Mat pnts3D;
int main(int argc, char** argv){
//Read in checkerboard images
frame1 = imread(file1);
frame2 = imread(file2);
//get corners
found1 = findChessboardCorners(frame1, chessSize, foundCorners1);
found2 = findChessboardCorners(frame2, chessSize, foundCorners2);
stereoRectify(cameraMat1, distCoeff1, cameraMat2, distCoeff2, imSize, R, T, R1, R2, P1, P2, Q);
//Addition - Undistort those points
undistortPoints(foundCorners1, ufoundCorners1, cameraMat1, distCoeff1, R1, P1);
undistortPoints(foundCorners2, ufoundCorners2, cameraMat2, distCoeff2, R2, P2);
//StereoTriangulation
triangulatePoints(P1, P2, ufoundCorners1, ufoundCorners2, homopnts3D);
//convert to euclidean
convertPointsFromHomogeneous(homopnts3D.reshape(4,1), pnts3D);
}
The code looks correct
You should check if your stereo rectification is correct with the remap function.
Mat rmap[2][2];
Mat rectifiedLeftImg;
Mat rectifiedRightImg;
initUndistortRectifyMap(cameraMat1, distCoeff1, R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMat2, distCoeff2, R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
cv::remap(frame1, rectifiedLeftimg, rmap[0][0], rmap[0][1], INTER_LINEAR, BORDER_CONSTANT, Scalar());
cv::remap(frame2, rectifiedRightimg, rmap[1][0], rmap[1][1], INTER_LINEAR, BORDER_CONSTANT, Scalar());
imshow("rectifiedLeft",rectifiedLeftimg);
imshow("rectifiedRight",rectifiedRightimg);

OpenCV real-time stereocam image processing problems (post-calibration)

I have built my own stereocamera from two USB cameras (with an annoying autofocus). I have calibrated them using /opencv/samples/cpp/stereo_calib.cpp', which produced an extrinsics/intrinsics file with an RMS Error of 0.4818 and an average reprojection error of 0.5426
I am now trying to run realtime depth mapping using Ptr<StereoBM> bm in openCV. I am getting useless images and I'm wondering if anyone can point me in the right direction of where to look. I'm also wondering if the autofocusing of the cameras every so often can change the focal length and therefore change the calibration, altering the results.
Code and pictures below.
Bonus if anyone can recommend any more robust methods than StereoBM for matching in openCV :)
Image of checkerboard calibration:
Left camera image:
Post StereoBM Processing:
Snippet of Code:
//Set up stereoBM
Ptr<StereoBM> bm = StereoBM::create(16,9);
//Read intrinsice parameters
string intrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/intrinsics.yml";
FileStorage fs(intrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open intrinsics.yml");
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
//Read Extrinsic Parameters
string extrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/extrinsics.yml";
fs.open(extrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open extrinsics");
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
Mat frame1,frame2, gray1, gray2;
int counter = 0;
capture >> frame1;
capture >> frame2;
Size img_size = frame1.size();
Rect roi1, roi2;
Mat Q;
stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
int numberOfDisparities = 16;
int SADWindowSize = 9;
bm->setROI1(roi1);
bm->setROI2(roi2);
bm->setPreFilterCap(31);
bm->setBlockSize(SADWindowSize);
bm->setMinDisparity(0);
bm->setNumDisparities(numberOfDisparities);
bm->setTextureThreshold(10);
bm->setUniquenessRatio(15);
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(1);
while(1){
capture >> frame1;
capture2 >> frame2;
imshow("Cam1", frame1);
imshow("Cam2", frame2);
/************************* STEREO ***********************/
cvtColor(frame1, gray1, CV_RGB2GRAY);
cvtColor(frame2, gray2, CV_RGB2GRAY);
int64 t = getTickCount();
Mat img1r, img2r;
remap(gray1, img1r, map11, map12, INTER_LINEAR);
remap(gray2, img2r, map21, map22, INTER_LINEAR);
Mat disp, disp8;
bm->compute(img1r, img2r, disp);
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
imshow("disparity", disp8);
}
So, half a pixel of RMS error means your calibration is basically garbage.
In your calibration image I noticed that the target is not even flat. If I can see that, the camera can too, but the calibration model will still assume it's flat, and that will make the optimizer very sad.
Suggest you take a look at this answer about calibration.

Deblurring algorithm in the Fourier domain

I'm trying to implement a deconvolution algorithm in Fourier transformed domain. I've a working implementation by myself in Matlab, and I want to translate it to C++ using OpenCV library.
Basically what I do is to extract the gradients from an input image, I do some stuff in transformed domain and then I come back to space domain.
The problematic part to me is to perform this (element wise) division:
DFT(im) = ( conj( DFT(f) ) * DFT(image) + L2 * conj( DFT( gradKernel-x ) ) * DFT(mux) )+ ... ) / ( norm( DFT(f) )^2 + L2 * norm(gradKernel-x)^2 + ... )
f is a gaussian kernel that is defined in the code.
DFT( gradKernel-x ) is the FFT of the gradient kernel in x direction, i.e. DFT([1,-1]) zero-padded to the size of the blurred image. mux is an auxiliary variable to perform the deconvolution.
I decided to perform the division by magnitude and phase separately in transformed domain before performing an inverse DFT.
I don't know where is the error in my code, maybe in the division, maybe in the forward/inverse transform of my variables, in the gaussian kernel...
If somebody can help me, I'd very grateful.
Here is the critical part of the code (note that I simplified it before posting, so don't expect a good deblurring result if you try it, basically what I expect from this is a visually pleasant output image):
imH00=imread("Cameraman256.png",0);
if(!imH00.data)
{
std::cout<< "Could not open or find the image" << std::endl ;
return -1;
}
imH00.convertTo(imH00,CV_32F,1.0/255.0,0.0);
// Gaussian Kernel
Mat ker1D=getGaussianKernel(ksize,sigma,CV_32F);
fkernel.create(imH00.size(),CV_32F);
// zero-padding
fkernel.setTo(Scalar::all(0));
temp=ker1D*ker1D.t();
temp.copyTo(fkernel(Rect(0,0,temp.rows,temp.cols)));
// Fourier transform
Mat planes[] = {Mat_<float>(fkernel), Mat::zeros(fkernel.size(), CV_32F)};
Mat ffkernel;
merge(planes, 2, ffkernel);
dft(ffkernel, ffkernel,DFT_COMPLEX_OUTPUT);
// Gradient filter in frequency domain, trying to do something similar to psf2otf([1;-1],size(imH00)); in Matlab.
dx=Mat::zeros(imH00.size(),CV_32F);
dx.at<float>(0,0)=1;
dx.at<float>(0,1)=-1;
Mat dxplanes[] = {Mat_<float>(dx), Mat::zeros(dx.size(), CV_32F)};
Mat fdx;
merge(dxplanes, 2, fdx);
dft(fdx, fdx,DFT_COMPLEX_OUTPUT);
dy=Mat::zeros(imH00.size(),CV_32F);
dy.at<float>(0,0)=1;
dy.at<float>(1,0)=-1;
Mat dyplanes[] = {Mat_<float>(dy), Mat::zeros(dy.size(), CV_32F)};
Mat fdy;
merge(dyplanes, 2, fdy);
dft(fdy, fdy,DFT_COMPLEX_OUTPUT);
// Denominators
Mat den1;
Mat den2;
Mat den21;
Mat den22;
// ||fdx||^2 and ||fdy||^2
mulSpectrums(fdx,fdx,den21,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fdy,fdy,den22,DFT_COMPLEX_OUTPUT,true);
add(den21,den22,den2);
mulSpectrums(ffkernel,ffkernel,den1,0,true);
imHk=imH00.clone();
mux=Mat::zeros(imH00.size(),CV_32F);
muy=Mat::zeros(imH00.size(),CV_32F);
// FFT imH00
Mat fHktplanes[] = {Mat_<float>(imH00), Mat::zeros(imH00.size(), CV_32F)};
Mat fHkt;
merge(fHktplanes, 2, fHkt);
dft(fHkt, fHkt,DFT_COMPLEX_OUTPUT);
std::cout<<"starting deconvolution"<<std::endl;
for (int j=0; j<4; j++)
{
// Deconvolution
// Gradients
Mat ddx(1,2,CV_32F,Scalar(0));
ddx.at<float>(0,0)=1;
ddx.at<float>(0,1)=-1;
filter2D(imHk,dHx,CV_32F,ddx);
Mat ddy(2,1,CV_32F,Scalar(0));
ddy.at<float>(0,0)=1;
ddy.at<float>(1,0)=-1;
filter2D(imHk,dHy,CV_32F,ddy);
mux=Scalar((float)-0.5*L1/L2);
add(mux,dHx,mux);
muy=Scalar((float)-0.5*L1/L2);
add(muy,dHy,muy);
// FFT mux, muy
Mat fmuxplanes[] = {Mat_<float>(mux), Mat::zeros(mux.size(), CV_32F)};
Mat fmux;
merge(fmuxplanes, 2, fmux);
dft(fmux, fmux,DFT_COMPLEX_OUTPUT);
Mat fmuyplanes[] = {Mat_<float>(muy), Mat::zeros(muy.size(), CV_32F)};
Mat fmuy;
merge(fmuyplanes, 2, fmuy);
dft(fmuy, fmuy,DFT_COMPLEX_OUTPUT);
Mat num1,num2,num3,num,den;
// Spectrums multiplication, complex conjugate
mulSpectrums(fHkt,ffkernel,num1,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fmux,fdx,num2,DFT_COMPLEX_OUTPUT,true);
mulSpectrums(fmuy,fdy,num3,DFT_COMPLEX_OUTPUT,true);
add(num2,num3,num2);
add(num1,L2*num2,num);
add(den1,L2*den2,den);
// Division in polar coordinates
Mat auxnum[2];
split(num,auxnum);
Mat auxden[2];
split(den,auxden);
Mat numMag,numPhase;
magnitude(auxnum[0],auxnum[1],numMag);
phase(auxnum[0],auxnum[1],numPhase);
Mat denMag,denPhase;
magnitude(auxden[0],auxden[1],denMag);
phase(auxden[0],auxden[1],denPhase);
Mat division[2];
divide(numMag,denMag,division[0]);
division[1]=numPhase-denPhase;
polarToCart(division[0],division[1],division[0],division[1]);
Mat fHk;
merge(division,2,fHk);
Mat imHkaux;
Mat planesfHk[2];
dft(fHk, imHkaux, DFT_INVERSE+DFT_SCALE);
split(imHkaux,planesfHk);
imHk=planesfHk[0]; // imHk is the Real part
}
imHk.convertTo(imHk,CV_8U,255);
imshow( "Deblurred image", imHk);
Thank you
The problem was in the Fourier transform of the filters. We need to shift filters kernels before transforming. This is the same that psf2otf function does in Matlab. If someone is interested, this simple code should perform the DFT of a Gaussian kernel which is not influenced by centering (as psf2otf):
float sigma=1.0;
short int ksize=13; // always odd
Mat ker1D=getGaussianKernel(ksize,sigma,CV_32F); //1D gaussian kernel
fkernel.create(myImage.size(),CV_32F); //
// zero-padding
fkernel.setTo(Scalar::all(0));
temp=ker1D*ker1D.t(); // 2D gaussian kernel, (Gaussian filter is separable)
int r=(ksize-1)/2; //radius
// shifting
temp(Rect(r,r,r+1,r+1)).copyTo(fkernel(Rect(0,0,r+1,r+1))); // q1
temp(Rect(r,0,r+1,r)).copyTo(fkernel(Rect(0,fkernel.cols-r,r+1,r))); // q2
temp(Rect(0,r,r,r+1)).copyTo(fkernel(Rect(fkernel.rows-r,0,r,r+1))); // q3
temp(Rect(0,0,r,r)).copyTo(fkernel(Rect(fkernel.rows-r,fkernel.cols-r,r,r))); // q4
// DFT
Mat planes[] = {Mat_<float>(fkernel), Mat::zeros(fkernel.size(), CV_32F)};
Mat ffkernel;
merge(planes, 2, ffkernel);
dft(ffkernel, ffkernel,DFT_COMPLEX_OUTPUT);