OpenCV: Issue with Stereocamera Tracking using cv::triangulatepoints() - c++

I'm trying to use the cv::triangulatePoints() function for stereocamera tracking of a checkerboard pattern, using two off the shelf webcams. I calibrated my setup using the Stereocamera Calibration Toolbox from MATLAB, which I then used in my OpenCV code.
My issue is that when I get coordinates from cv::triangulatePoints() (after I convert it into euclidean space) they do not form a plane of points when 3D plotting them into MATLAB. I was wondering if there is a bug in my code that I have overlooked?
The code I'm using is listed below. Any insight would help greatly!
Mat cameraMat1 = (Mat_<double>(3,3) << 1411.3, 2.2527, 958.3516,
0, 1404.1, 566.6821,
0, 0, 1);
Mat distCoeff1 =(Mat_<double>(5,1) << 0.0522,
-0.1651,
0.0023,
0.0020,
0.0924);
Mat cameraMat2 = (Mat_<double>(3,3) << 1413.7, -1.2189, 968.5768,
0, 1408.1, 505.1645,
0, 0, 1);
Mat distCoeff2 =(Mat_<double>(5,1) << 0.0465,
-0.1948,
-0.0013,
-0.00016774,
0.1495);
Mat R = (Mat_<double>(3,3) << 0.9108, 0.0143, -0.4127, -0.0228, 0.9996, -0.0157, 0.4123, 0.0237, 0.9107);
Mat T = (Mat_<double>(3,1) << -209.4118, 0.2208, 49.1987);
Mat R1, R2, P1, P2, Q;
Size imSize = Size(1920,1080); //Pixel Resolution
Mat frame1, frame2;
vector<Point2f> foundCorners1;
vector<Point2f> foundCorners2;
Size chessSize(11,8);
//for undistort
vector<Point2f> ufoundCorners1;
vector<Point2f> ufoundCorners2;
Mat homopnts3D(4, foundCorners1.size(), CV_64F);
Mat pnts3D;
int main(int argc, char** argv){
//Read in checkerboard images
frame1 = imread(file1);
frame2 = imread(file2);
//get corners
found1 = findChessboardCorners(frame1, chessSize, foundCorners1);
found2 = findChessboardCorners(frame2, chessSize, foundCorners2);
stereoRectify(cameraMat1, distCoeff1, cameraMat2, distCoeff2, imSize, R, T, R1, R2, P1, P2, Q);
//Addition - Undistort those points
undistortPoints(foundCorners1, ufoundCorners1, cameraMat1, distCoeff1, R1, P1);
undistortPoints(foundCorners2, ufoundCorners2, cameraMat2, distCoeff2, R2, P2);
//StereoTriangulation
triangulatePoints(P1, P2, ufoundCorners1, ufoundCorners2, homopnts3D);
//convert to euclidean
convertPointsFromHomogeneous(homopnts3D.reshape(4,1), pnts3D);
}

The code looks correct
You should check if your stereo rectification is correct with the remap function.
Mat rmap[2][2];
Mat rectifiedLeftImg;
Mat rectifiedRightImg;
initUndistortRectifyMap(cameraMat1, distCoeff1, R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
initUndistortRectifyMap(cameraMat2, distCoeff2, R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);
cv::remap(frame1, rectifiedLeftimg, rmap[0][0], rmap[0][1], INTER_LINEAR, BORDER_CONSTANT, Scalar());
cv::remap(frame2, rectifiedRightimg, rmap[1][0], rmap[1][1], INTER_LINEAR, BORDER_CONSTANT, Scalar());
imshow("rectifiedLeft",rectifiedLeftimg);
imshow("rectifiedRight",rectifiedRightimg);

Related

Performing convolution in frequency domain manually but getting wrong output image in CPP/Opencv

I followed the following steps:-
1. Calculated dft of image
2. Calculated dft of kernel (but 1st padded it to size of image)
3. Multiplied real and imaginary parts of both dft individually
4. Calculated inverse dft
I tried to display the images in each intermediate step but the final image comes out to be almost black except in corners.
Image fourier transform output after multiplication and its inverse dft output
input image
enter code here
#include <iostream>
#include <stdlib.h>
#include <opencv2/opencv.hpp>
#include <stdio.h>
int r=100;
#define SIGMA_CLIP 6.0f
using namespace cv;
using namespace std;
void updateResult(Mat complex)
{
Mat work;
idft(complex, work);
Mat planes[] = {Mat::zeros(complex.size(), CV_32F), Mat::zeros(complex.size(), CV_32F)};
split(work, planes); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], work); // === sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)
normalize(work, work, 0, 1, NORM_MINMAX);
imshow("result", work);
}
void shift(Mat magI) {
// crop if it has an odd number of rows or columns
magI = magI(Rect(0, 0, magI.cols & -2, magI.rows & -2));
int cx = magI.cols/2;
int cy = magI.rows/2;
Mat q0(magI, Rect(0, 0, cx, cy)); // Top-Left - Create a ROI per quadrant
Mat q1(magI, Rect(cx, 0, cx, cy)); // Top-Right
Mat q2(magI, Rect(0, cy, cx, cy)); // Bottom-Left
Mat q3(magI, Rect(cx, cy, cx, cy)); // Bottom-Right
Mat tmp; // swap quadrants (Top-Left with Bottom-Right)
q0.copyTo(tmp);
q3.copyTo(q0);
tmp.copyTo(q3);
q1.copyTo(tmp); // swap quadrant (Top-Right with Bottom-Left)
q2.copyTo(q1);
tmp.copyTo(q2);
}
Mat updateMag(Mat complex )
{
Mat magI;
Mat planes[] = {Mat::zeros(complex.size(), CV_32F), Mat::zeros(complex.size(), CV_32F)};
split(complex, planes); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
magnitude(planes[0], planes[1], magI); // sqrt(Re(DFT(I))^2 + Im(DFT(I))^2)
// switch to logarithmic scale: log(1 + magnitude)
magI += Scalar::all(1);
log(magI, magI);
shift(magI);
normalize(magI, magI, 1, 0, NORM_INF); // Transform the matrix with float values into a
return magI; // viewable image form (float between values 0 and 1).
//imshow("spectrum", magI);
}
Mat createGausFilterMask(Size imsize, int radius) {
// call openCV gaussian kernel generator
double sigma = (r/SIGMA_CLIP+0.5f);
Mat kernelX = getGaussianKernel(2*radius+1, sigma, CV_32F);
Mat kernelY = getGaussianKernel(2*radius+1, sigma, CV_32F);
// create 2d gaus
Mat kernel = kernelX * kernelY.t();
int w = imsize.width-kernel.cols;
int h = imsize.height-kernel.rows;
int r = w/2;
int l = imsize.width-kernel.cols -r;
int b = h/2;
int t = imsize.height-kernel.rows -b;
Mat ret;
copyMakeBorder(kernel,ret,t,b,l,r,BORDER_CONSTANT,Scalar::all(0));
return ret;
}
//code reference https://docs.opencv.org/2.4/doc/tutorials/core/discrete_fourier_transform/discrete_fourier_transform.html
int main( int argc, char** argv )
{
String file;
file = "lena.png";
Mat image = imread(file, CV_LOAD_IMAGE_GRAYSCALE);
Mat padded;
int m = getOptimalDFTSize( image.rows );
int n = getOptimalDFTSize( image.cols );
copyMakeBorder(image, padded, 0, m - image.rows, 0, n -image.cols, BORDER_CONSTANT, Scalar::all(0));//expand input image to optimal size , on the border add zero values
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexI;
merge(planes, 2, complexI);
dft(complexI, complexI); //computing dft
split(complexI, planes); //image converted to complex and real dft here
Mat mask = createGausFilterMask(padded.size(),r ); // Forming the gaussian filter
Mat mplane[] = {Mat_<float>(mask), Mat::zeros(mask.size(), CV_32F)};
Mat kernelcomplex;
merge(mplane, 2, kernelcomplex);
dft(kernelcomplex, kernelcomplex);
split(kernelcomplex, mplane);// splitting the dft of kernel to real and complex
mplane[1]=mplane[0]; //overwriting imaginary values with real values of kernel dft
Mat kernel_spec;
merge(mplane, 2, kernel_spec);
mulSpectrums(complexI, kernel_spec, complexI, DFT_ROWS);
Mat magI=updateMag(complexI);
namedWindow( "image fourier", CV_WINDOW_AUTOSIZE );
imshow("spectrum magnitude", magI);
updateResult(complexI); //converting to viewable form, computing idft
waitKey(0);
return 0;
}
Which step is going wrong? Or am i missing on to some concept?
Edited the code with help of Cris and it now works perfectly.
There are two immediately apparent issues:
The Gaussian is real-valued and symmetric. Its Fourier transform should be too. If the DFT of your kernel has a non-zero imaginary component, you're doing something wrong.
Likely, what you are doing wrong is that your kernel has its origin in the middle of the image, rather than at the top-left sample. This is the same issue as in this other question. The solution is to use the equivalent of MATLAB's ifftshift, an implementation of which is shown in the OpenCV documentation ("step 6, Crop and rearrange").
To apply the convolution, you need to multiply the two DFTs together, not the real parts and imaginary parts of the DFTs. Multiplying two complex numbers a+ib and c+id results in ac-bd+iad+ibc, not ac+ibd.
But since the DFT of your kernel should be real-valued only, you can simply multiply the real component of the kernel with both the real and imaginary components of the image: (a+ib)c = ac+ibc.
It seems very roundabout what you are doing with the complex-valued images. Why not let OpenCV handle all of that for you? You can probably* just do something like this:
Mat image = imread(file, CV_LOAD_IMAGE_GRAYSCALE);
// Expand input image to optimal size, on the border add zero values
Mat padded;
int m = getOptimalDFTSize(image.rows);
int n = getOptimalDFTSize(image.cols);
copyMakeBorder(image, padded, 0, m - image.rows, 0, n -image.cols, BORDER_CONSTANT, Scalar::all(0));
// Computing DFT
Mat DFTimage;
dft(padded, DFTimage);
// Forming the Gaussian filter
Mat kernel = createGausFilterMask(padded.size(), r);
shift(kernel);
Mat DFTkernel;
dft(kernel, DFTkernel);
// Convolution
mulSpectrums(DFTimage, DFTkernel, DFTimage, DFT_ROWS);
// Display Fourier-domain result
Mat magI = updateMag(DFTimage);
imshow("spectrum magnitude", magI);
// IDFT
Mat work;
idft(complex, work); // <- NOTE! Don't inverse transform log-transformed magnitude image!
Note that the Fourier-Domain result is actually a special representation of the complex-conjugate symmetric DFT, intended to save space and computations. To compute the full complex output, add the DFT_COMPLEX_OUTPUT to the call to dft, and DFT_REAL_OUTPUT to the call to idft (this latter then assumes symmetry, and produces a real-valued output, saving you the hassle of computing the magnitude).
* I say probably because I haven't compiled any of this... If there's something wrong, please let me know, or edit the answer and fix it.

OpenCV real-time stereocam image processing problems (post-calibration)

I have built my own stereocamera from two USB cameras (with an annoying autofocus). I have calibrated them using /opencv/samples/cpp/stereo_calib.cpp', which produced an extrinsics/intrinsics file with an RMS Error of 0.4818 and an average reprojection error of 0.5426
I am now trying to run realtime depth mapping using Ptr<StereoBM> bm in openCV. I am getting useless images and I'm wondering if anyone can point me in the right direction of where to look. I'm also wondering if the autofocusing of the cameras every so often can change the focal length and therefore change the calibration, altering the results.
Code and pictures below.
Bonus if anyone can recommend any more robust methods than StereoBM for matching in openCV :)
Image of checkerboard calibration:
Left camera image:
Post StereoBM Processing:
Snippet of Code:
//Set up stereoBM
Ptr<StereoBM> bm = StereoBM::create(16,9);
//Read intrinsice parameters
string intrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/intrinsics.yml";
FileStorage fs(intrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open intrinsics.yml");
return -1;
}
Mat M1, D1, M2, D2;
fs["M1"] >> M1;
fs["D1"] >> D1;
fs["M2"] >> M2;
fs["D2"] >> D2;
//Read Extrinsic Parameters
string extrinsic_filepath = "/home/will/Desktop/repos3.0/stereo-vision/Stereo-Vision/extrinsics.yml";
fs.open(extrinsic_filepath, FileStorage::READ);
if(!fs.isOpened())
{
printf("Failed to open extrinsics");
return -1;
}
Mat R, T, R1, P1, R2, P2;
fs["R"] >> R;
fs["T"] >> T;
Mat frame1,frame2, gray1, gray2;
int counter = 0;
capture >> frame1;
capture >> frame2;
Size img_size = frame1.size();
Rect roi1, roi2;
Mat Q;
stereoRectify( M1, D1, M2, D2, img_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 );
Mat map11, map12, map21, map22;
initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12);
initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22);
int numberOfDisparities = 16;
int SADWindowSize = 9;
bm->setROI1(roi1);
bm->setROI2(roi2);
bm->setPreFilterCap(31);
bm->setBlockSize(SADWindowSize);
bm->setMinDisparity(0);
bm->setNumDisparities(numberOfDisparities);
bm->setTextureThreshold(10);
bm->setUniquenessRatio(15);
bm->setSpeckleWindowSize(100);
bm->setSpeckleRange(32);
bm->setDisp12MaxDiff(1);
while(1){
capture >> frame1;
capture2 >> frame2;
imshow("Cam1", frame1);
imshow("Cam2", frame2);
/************************* STEREO ***********************/
cvtColor(frame1, gray1, CV_RGB2GRAY);
cvtColor(frame2, gray2, CV_RGB2GRAY);
int64 t = getTickCount();
Mat img1r, img2r;
remap(gray1, img1r, map11, map12, INTER_LINEAR);
remap(gray2, img2r, map21, map22, INTER_LINEAR);
Mat disp, disp8;
bm->compute(img1r, img2r, disp);
t = getTickCount() - t;
printf("Time elapsed: %fms\n", t*1000/getTickFrequency());
disp.convertTo(disp8, CV_8U, 255/(numberOfDisparities*16.));
imshow("disparity", disp8);
}
So, half a pixel of RMS error means your calibration is basically garbage.
In your calibration image I noticed that the target is not even flat. If I can see that, the camera can too, but the calibration model will still assume it's flat, and that will make the optimizer very sad.
Suggest you take a look at this answer about calibration.

deblurring image by deconvolution using opencv

I have two images o1 & o2, and I have blurred the two images using the same Gaussian blurring kernel. Then I have found kernel k1 = DFT(b1) / DFT (o1), where b1 is the image obtained by blurring o1.
I have used this kernal (k1) to perform deconvolution on b2, where b2 is obtained by blurring o2.
But deblurred output is not correct (the output image does not have any relation with original) what is the problem in my code ?
int main(int argc, char** argv)
{
Mat orig1 = imread(argv[1], 0);
Mat orig2 = imread(argv[2], 0);
Mat blur1, blur2;
GaussianBlur(orig1, blur1, Size(11, 11), 0, 0 );
GaussianBlur(orig2, blur2, Size(11, 11), 0, 0 );
imshow("or1", orig1);
imshow("bl1", blur1);
imshow("or2", orig2);
imshow("bl2", blur2);
waitKey(0);
deconvolution(orig1, blur1, orig2, blur2);
return 0;
}
void deconvolution(Mat & o1, Mat & b1, Mat & o2, Mat & b2)
{
Mat o1f, o2f, b1f, b2f;
Mat o1dft, o2dft, b1dft, b2dft;
o1.convertTo(o1f, CV_32F);
b1.convertTo(b1f, CV_32F);
o2.convertTo(o2f, CV_32F);
b2.convertTo(b2f, CV_32F);
computeDFT(o1f, o1dft);
computeDFT(b1f, b1dft);
computeDFT(o2f, o2dft);
computeDFT(b2f, b2dft);
Mat k1, k2, b1d, b2d;
divide(b1dft, o1dft, k1);
Mat r1, r2;
divide(b1dft, k1, r1);
divide(b2dft, k1, r2);
Mat idftr1, idftr2;
computeIDFT(r1, idftr1);
computeIDFT(r2, idftr2);
Mat r1_8u, r2_8u;
idftr1.convertTo(r1_8u, CV_8U);
idftr2.convertTo(r2_8u, CV_8U);
imshow("r1", r1_8u);
imshow("r2", r2_8u);
waitKey(0);
destroyAllWindows();
}
Images o1, o2, b1, b2, r1 and r2 are given in order below:
The problem is most likely that your blurring kernel has vanishing coefficients for certain frequencies. For each coefficient of the transform of your signal (f) and blurring kernel (h), you calculate f/h right now. This is effectively a division by zero for these coefficients, resulting in the strong noise you observe.
A quick solution for this would be pseudo-inverse filtering:
use f/h only for |h| > epsilon
set coefficient to 0 else
If this isn't smooth enough, you can get better results with
wiener filtering.

OpenCV run-time error while calibrating camera in line calibratecamera

I wrote a calibration code as bellow:
int numBoards = 20;
int numCornersHor=6;
int numCornersVer=9;
int numSquares = numCornersHor * numCornersVer;
cv::Size board_sz = cv::Size(numCornersHor, numCornersVer);
std::vector<std::vector<cv::Point3f> > object_points;
std::vector<std::vector<cv::Point2f> > image_points;
std::vector<cv::Point2f> corners;
std::vector<cv::Point3f> obj;
for(int j=0;j<numSquares;j++)
obj.push_back(cv::Point3f(j/numCornersHor, j%numCornersHor, 0.0f));
int successes=0;
After initialization of useful variables, I get frames from webcam and store it in buffer.
while(successes<numBoards)
{
unsigned char* buffer=eyeCamera->getFrame();
cv::Mat rawImg=cv::Mat(cv::Size(widthCam,heightCam),CV_8UC4, buffer,cv::Mat::AUTO_STEP);
cv::Mat grayImg;
cv::cvtColor(rawImg,grayImg,CV_BGR2GRAY);
bool found = findChessboardCorners(rawImg, board_sz, corners,
CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
if(found)
{
cv::cornerSubPix(grayImg, corners, cv::Size(11, 11), cv::Size(-1, -1),
cv::TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 1.1));
cv::drawChessboardCorners(grayImg, board_sz, corners, found);
}
cv::imshow("win2", grayImg);
Everything is ok so far.I show grayImg and chessboard corners are painted.
int key = cv::waitKey(1);
if(key==27)
return;
if(key==' ' && found!=0)
{
image_points.push_back(corners);
object_points.push_back(obj);
successes++;
if(successes>=numBoards)
break;
}
}
cv::Mat intrinsic = cv::Mat(3, 3, CV_64F);
cv::Mat distCoeffs= cv::Mat(8, 1, CV_64F);
std::vector<cv::Mat> rvecs;
std::vector<cv::Mat> tvecs;
intrinsic.at<double>(0,0) = 1.0;
int widthCam=640;
int heightCam=480;
object_points and image_points are filled with 54 elements ~ 9 * 6
cv::calibrateCamera(object_points, image_points, cv::Size(widthCam,heightCam), intrinsic, distCoeffs, rvecs, tvecs);
I'm using Qt creator. I always get run-time error while calling the last line: calibrateCamera()
Edit: I tried the same code with cvCalibrateCamera2 and again i got the same error. I provide opencv Exeption:
OpenCV error: Bad argument (the output array of translation vectors must be 3-channel
1xn or nx1 array or 1-channel nx3 array, where n is the bumber of views) in
cvCalibrateCamera2, file F:\OpenCV\opencv\modules\calib3d\src\calibration.cpp,line 1506
terminate called after throwing an instance of 'cv::Exeption'
I am using 10 snapshot and my defined rvec and tvec are as follows:
CvMat* rvec = cvCreateMat(10,3,CV_32FC1);
CvMat* tvec = cvCreateMat(10,3,CV_32FC1);
Can anyone help me solve this error?
Thanks.
Okay, tried this now locally on my system, turns out that the definition of rVecs and tVecs was actually differently from what I expected it to be.
CvMat* rVecs = cvCreateMat( 1, 1, CV_32FC3 );
CvMat* tVecs = cvCreateMat( 1, 1, CV_32FC3 );
did the job for me.
I wonder if
CvMat* tvec = cvCreateMat(3,10,CV_32FC1);
will help.
also, try
CvMat* tvec = cvCreateMat(1,10,CV_32FC3);

DFT to spatial domain in OpenCV is not working

I have created dft of an image and after some adjustment with filters i want to convert it back to the real image but every time when i do that it gives me wrong result ..seems like its not converting it back.
ForierTransform and createGaussianHighPassFilter are my own functions rest of the code i am using like below for the inversion back to real image.
Mat fft = ForierTransform(HeightPadded,WidthPadded);
Mat ghpf = createGaussianHighPassFilter(Size(WidthPadded, HeightPadded), db);
Mat res;
cv::multiply(fft,ghpf,res);
imshow("fftXhighpass1", res);
idft(res,res,DFT_INVERSE,res.rows);
cv::Mat croped = res(cv::Rect(0, 0, img.cols,img.rows));
//res.convertTo(res,CV_32S);
imshow("fftXhighpass", res);
even if i dont apply the filter i am unable to reverse back dft result ...
here is my dft code is , i could not find any sample to reverse dft back to normal image..
Mat ForierTransform(int M,int N)
{
Mat img = imread("thumb1-small-test.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat padded;
copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
split(complexImg, planes);
magnitude(planes[0], planes[1], planes[0]);
Mat mag = planes[0];
mag += Scalar::all(1);
log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
normalize(mag, mag, 0, 1, CV_MINMAX);
return mag;
}
kindly help
[EDIT: After I found the solution with the help of mevatron] (below is the correct code)
Mat ForierTransform(int M,int N)
{
Mat img = imread("thumb1-small-test.jpg", CV_LOAD_IMAGE_GRAYSCALE);
Mat padded;
copyMakeBorder(img, padded, 0, M - img.rows, 0, N - img.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
return complexImg;
}
Mat img = imread("thumb1-small-test.jpg",CV_LOAD_IMAGE_GRAYSCALE);
int WidthPadded=0,HeightPadded=0;
WidthPadded=img.cols*2;
HeightPadded=img.rows*2;
int M = getOptimalDFTSize( img.rows );
//Create a Gaussian Highpass filter 5% the height of the Fourier transform
double db = 0.05 * HeightPadded;
Mat fft = ForierTransform(HeightPadded,WidthPadded);
Mat ghpf = createGaussianHighPassFilter(Size(WidthPadded, HeightPadded), db);
Mat res;
cv::mulSpectrums(fft,ghpf,res,DFT_COMPLEX_OUTPUT);
idft(res,res,DFT_COMPLEX_OUTPUT,img.rows);
Mat padded;
copyMakeBorder(img, padded, 0, img.rows, 0, img.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = {Mat_<float>(padded), Mat::zeros(padded.size(), CV_32F)};
split(res, planes);
magnitude(planes[0], planes[1], planes[0]);
Mat mag = planes[0];
mag += Scalar::all(1);
log(mag, mag);
// crop the spectrum, if it has an odd number of rows or columns
mag = mag(Rect(0, 0, mag.cols & -2, mag.rows & -2));
int cx = mag.cols/2;
int cy = mag.rows/2;
normalize(mag, mag, 1, 0, CV_MINMAX);
cv::Mat croped = mag(cv::Rect(cx, cy, img.cols,img.rows));
cv::threshold(croped , croped , 0.56, 1, cv::THRESH_BINARY);
imshow("fftPLUShpf", mag);
imshow("cropedBinary", croped);
It now can able to display ridges valley of finger , and can be more optimize with respect to threshold as well
I see a few problems going on here.
First, you need to use the mulSpectrums function to convolve two FFTs, and not multiply.
Second, the createGaussianHighPassFilter is only outputting a single channel non-complex filter. You'll probably need to just set the complex channel to Mat::zeros like you did for your input image.
Third, don't convert the output of the FFT to log-magnitude spectrum. It will not combine correctly with the filter, and you won't get the same thing when performing the inverse. So, just return complexImg right after the DFT is executed. Log-magnitude spectrum is useful for a human to look at the data, but not for what you are trying to do.
Finally, make sure you pay attention to the difference to between the full-complex output of dft and the Complex Conjugate Symmetric (CCS) packed output. Intel has a good page on how this data is formatted here. In your case, for simplicity I would keep everything in full-complex mode to make your life easier.
Hope that helps!