spectral residual saliency detection in C++ with CImg - c++

I'm trying to implement the spectral residual approach for saliency detection, described in this paper:
http://www.klab.caltech.edu/~xhou/papers/cvpr07.pdf
There is a reference implementation in Matlab Code, taken from their website:
http://www.klab.caltech.edu/~xhou/projects/spectralResidual/spectralresidual.html
clear
clc
%% Read image from file
inImg = im2double(rgb2gray(imread('yourImage.jpg')));
inImg = imresize(inImg, 64/size(inImg, 2));
%% Spectral Residual
myFFT = fft2(inImg);
myLogAmplitude = log(abs(myFFT));
myPhase = angle(myFFT);
mySpectralResidual = myLogAmplitude - imfilter(myLogAmplitude, fspecial('average', 3),'replicate');
saliencyMap = abs(ifft2(exp(mySpectralResidual + i*myPhase))).^2;
%% After Effect
saliencyMap = mat2gray(imfilter(saliencyMap, fspecial('gaussian', [10, 10], 2.5)));
imshow(saliencyMap);
I've tried to translate it to C++ with CImg.
Where I fail is here:
myPhase = angle(myFFT);
and here
saliencyMap = abs(ifft2(exp(mySpectralResidual + i*myPhase))).^2;
Here's my code:
#include <CImg.h>
#include <iostream>
using namespace cimg_library;
int main() {
CImg<unsigned char> image("img2.jpg");
CImg<float> mask(3,3,1,1,1.0/9.0);
image.resize(64,64);
CImgList<float> myFFT = image.get_FFT();
const CImg<float> MyLogAmplitude = ((myFFT[0].get_pow(2) + myFFT[1].get_pow(2)).get_sqrt()).get_log(); //Magnitude
const CImg<float> MyPhase = myFFT[0].get_atan2(myFFT[1]);
const CImg<float> A = MyLogAmplitude.get_convolve(mask);
const CImg<float> MySpectralResidual = MyLogAmplitude-A;
CImgList<float> tmp = CImgList<float>(MyResidual.get_exp(),MyPhase);
CImgList<float> MySaliencyMap = tmp.get_FFT(true);
CImgDisplay draw_disp0(MySaliencyMap,"Image");
while (!draw_disp0.is_closed()) {
draw_disp0.wait();
}
return 0;
}
Anybody seen an obvious mistake?

I think I can see two mistakes in your code :
First, the atan2() call for MyPhase has arguments inverted. Should be written as
const CImg MyPhase = myFFT[1].get_atan2(myFFT[0]);
(but this is probably not much of an issue here).
Second, and it is more serious, you are doing the inverse FFT on a pair of complex values coded as (amplitude,phase), which is not what CImg expects there, as the FFT() function supposes you input a (real,imaginary) pair of images. That probably makes a huge difference in the result.

Actually I had the same problem. Here is the problem solving code.
I edited some to create a rectangle around the object salient.
This code works for me.
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "cv.h"
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <iostream>
using namespace cv;
using namespace std;
// function Fourier transform
void fft2(IplImage *src, IplImage *dst);
int main()
{
string imagePath = "inputgambar/34.jpg";
//string imageSave = "saliency/42.jpg";
//string imageRectangular = "rectangular/42.jpg";
IplImage *ImageAsli, *ImageSaliency, *src, *ImageRe, *ImageIm, *Fourier, *Inverse, *LogAmplitude, *Sine, *Cosine;
IplImage *Saliency, *Residual;
IplImage *tmp1, *tmp2, *tmp3;
Mat gambarSave, threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
double minNum = 0, maxNum = 0, scale, shift, rata2, nilaiThreshold, Lebar, gantiPixel;
// load image Asli
ImageAsli = cvLoadImage(imagePath.c_str());
cvNamedWindow("ImageAsli", CV_WINDOW_NORMAL);
cvShowImage("ImageAsli", ImageAsli);
cvMoveWindow("ImageAsli",0,100);
// Load image, jadikan single channel/gray
//inputImage = cvLoadImage(imagePath.c_str());
src = cvLoadImage(imagePath.c_str(),0);
Lebar = src->width;
gantiPixel = 64/Lebar;
// Fourier , punya 2 channel, Real dan Imajiner
Fourier = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 2);
Inverse = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 2);
// Real , Imajiner spektrum
ImageRe = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
ImageIm = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
// log amplitude
LogAmplitude = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
// Sinus, Cosinus spektrum
Sine = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
Cosine = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
// spectral residual
Residual = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
// Saliency
Saliency = cvCreateImage (cvGetSize (src), src-> depth, src-> nChannels);
// Temporary space
tmp1 = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
tmp2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
tmp3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
//
scale = 1.0/255.0;
cvConvertScale (src, tmp1, 1, 0);
//
fft2 (tmp1, Fourier);
// Real dan Imajiner ditaruh di ImageRe ImageIm
cvSplit (Fourier, ImageRe, ImageIm, 0, 0);
// Magnitude/Amplitudo Fourier di tmp3
cvPow( ImageRe, tmp1, 2.0);
cvPow( ImageIm, tmp2, 2.0);
cvAdd( tmp1, tmp2, tmp3);
cvPow( tmp3, tmp3, 0.5 );
// logAmplitude , sin, cosin
cvLog (tmp3, LogAmplitude);
cvDiv (ImageIm, tmp3, Sine);
cvDiv (ImageRe, tmp3, Cosine);
// smoothing (1/(3×3)) * ones(3), mean filter pada logAmplitude ditempatkan pada tmp3
cvSmooth (LogAmplitude, tmp3, CV_BLUR, 3, 3);
// Spectral Residual = LogAmp-tmp3
cvSub (LogAmplitude, tmp3, Residual);
/************************************************************************ /
inverse Fourier Transform --> exp (Residual + i * Phase)
Euler's formula:
exp(r + i * T) = exp(r) * (cos(T) + i * sin(T)) = exp(r) * cos(T) + i * exp(r) * sin(T)
Sin (T) = ImageIm / LogAmplitude; cos(T) = ImageRe / LogAmplitude;
/************************************************************************/
cvExp(Residual, Residual);
cvMul(Residual, Cosine, tmp1);
cvMul(Residual, Sine, tmp2);
// Merging Residual, Saliency 1 channel => Fourier 2 channel
cvMerge (tmp1, tmp2, 0, 0, Fourier);
// Inverse Fourier transform
cvDFT (Fourier, Inverse, CV_DXT_INV_SCALE);
cvSplit (Inverse, tmp1, tmp2, 0,0);
// tmp3 = kuadrat akar tmp1 tmp2
cvPow (tmp1, tmp1, 2);
cvPow (tmp2, tmp2, 2);
cvAdd (tmp1, tmp2, tmp3);
// Gaussian filter 7x7 kernel
cvSmooth (tmp3, tmp3, CV_GAUSSIAN, 7, 7);
//CoreCVminmaxloc
cvMinMaxLoc (tmp3, & minNum, & maxNum, NULL, NULL);
scale = 255 / (maxNum - minNum);
shift =-minNum * scale;
// End of Saliency
cvConvertScale(tmp3, Saliency, scale, shift);
//deteksi proto objek
CvScalar rataan = cvAvg(Saliency);
nilaiThreshold = 3* (rataan .val[0]);
//cout << nilaiThreshold ;
gambarSave = Mat(Saliency);
//imwrite(imageSave.c_str(), gambarSave);
//resize(gambarSave, gambarSave, Size(), gantiPixel, gantiPixel, CV_INTER_AREA);
//ImageSaliency = cvCreateImage(cvSize(Saliency-> width * gantiPixel, Saliency-> height *gantiPixel), Saliency -> depth, Saliency -> nChannels);
//cvResize(Saliency, ImageSaliency, CV_INTER_AREA);
cvNamedWindow("Saliency", CV_WINDOW_NORMAL);
cvShowImage("Saliency", Saliency);
cvMoveWindow("Saliency",0,500);
/// Detect edges using Threshold
threshold( gambarSave, threshold_output, nilaiThreshold, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles
vector<RotatedRect> minRect( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ minRect[i] = minAreaRect( Mat(contours[i]) );
}
/// Draw rotated rects
for( int i = 0; i< contours.size(); i++ )
{
// rotated rectangle
Point2f rect_points[4]; minRect[i].points( rect_points );
for( int j = 0; j < 4; j++ )
line( gambarSave, rect_points[j], rect_points[(j+1)%4], Scalar(100), 2, 8 );
}
//imwrite(imageRectangular.c_str(), gambarSave);
/// Show in a window
namedWindow( "Rectangular", CV_WINDOW_AUTOSIZE );
imshow( "Rectangular", gambarSave );
cvMoveWindow("Rectangular",480,100);
cvWaitKey(0);
//Release images
cvReleaseImage(&src);
cvReleaseImage(&ImageIm);
cvReleaseImage(&ImageRe);
cvReleaseImage(&Fourier);
cvReleaseImage(&Inverse);
cvReleaseImage(&LogAmplitude);
cvReleaseImage(&Sine);
cvReleaseImage(&Cosine);
cvReleaseImage(&Saliency);
cvReleaseImage(&Residual);
cvReleaseImage(&tmp1);
cvReleaseImage(&tmp2);
cvReleaseImage(&tmp3);
cvReleaseImage(&ImageAsli);
cvDestroyAllWindows();
return 0;
}
//Fourier transform
void fft2(IplImage *src, IplImage *dst)
{
IplImage *image_Re = 0, *image_Im = 0, *Fourier = 0;
//1 channel ImageRe, ImageIm
image_Re = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
//2 channels (image_Re, image_Im)
Fourier = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 2);
/************************************************* ***********************/
// isi nilai image_Re
cvConvertScale(src, image_Re, 1, 0);
// nilai initial Imajiner di Set 0
cvZero(image_Im);
// Join real and imaginary parts and stock them in Fourier image
cvMerge(image_Re, image_Im, 0, 0, Fourier);
// forward Fourier transform
cvDFT(Fourier, dst, CV_DXT_FORWARD);
cvReleaseImage(&image_Re);
cvReleaseImage(&image_Im);
cvReleaseImage(&Fourier);
}
http://i60.tinypic.com/5xvmhi.png

Related

How to rotate a point cloud given 3 points?

I have a 3D depth camera placed above three moving belt lanes and I'm trying to rotate the depth image (or the point cloud) so that the three lanes match the camera's angle. I'm not experienced at all with point clouds, but after some research I've tried to do the following:
Acquire an XYZ cartesian image from the sensor which I turn into a point cloud vector.
Define three points on the point cloud, one on each of the three lanes.
Fit a plane through them by finding the plane coefficients.
Finding the cross product between the plane and the z_normal, and then finding the angle of
rotation.
Use the Eigen library to transform the PCL cloud and turn it back into an openCV Mat.
For whatever reason, I always end up with a bad image with max-int values on one side and zeros on the other. I'm not certain anymore if there's something wrong with the code or if the method above is incorrect to start with.
My code so far:
// helper functions
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat);
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr);
void colorize(cv::Mat& src, cv::Mat& dst);
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound);
while(1)
{
// camera framegrabber object to capture an image
fg->SWTrigger();
if (!fg->WaitForFrame(im.get(), 2000))
{
throw;
}
// openCV Mat declerations
cv::Mat zDepth, zDepthColor;
cv::Mat xyz = im->XYZImage();
vector<cv::Mat> channels(3);
cv::split(xyz, channels);
zDepth = channels[0];
cv::imwrite("xyzMat.png", xyz);
cv::imwrite("depthImage.png", zDepth);
clip(zDepth, 1250, 1400);
colorise(zDepth, zDepthColor);
cv::imwrite("depthColored.png", zDepthColor);
// specify a 3D point on each lane
cv::Point3i p1, p2, p3;
p1.x = w / 4;
p1.y = 24;
p1.z = zDepth.at<uint16_t>(p1.x, p1.y);
p2.x = w / 2;
p2.y = 70;
p2.z = zDepth.at<uint16_t>(p2.x, p2.y);
p3.x = int(w * 0.75);
p3.y = 114;
p3.z = zDepth.at<uint16_t>(p3.x, p3.y);
auto cross = (p2 - p1).cross(p3 - p1);
// transform Mats to point clouds
pcl::PointCloud<pcl::PointXYZ>::Ptr floor_plane, xyzCentered;
floor_plane = MatToPcl(zDepth);
Eigen::Matrix<float, 1, 3> floor_plane_normal_vector, xy_plane_normal_vector, rotation_vector;
floor_plane_normal_vector[0] = cross.x;
floor_plane_normal_vector[1] = cross.y;
floor_plane_normal_vector[2] = cross.z;
// specify the z normal from the xy-plane
xy_plane_normal_vector[0] = 0.0;
xy_plane_normal_vector[1] = 0.0;
xy_plane_normal_vector[2] = 1.0;
// cross product and normalize vector
rotation_vector = xy_plane_normal_vector.cross(floor_plane_normal_vector);
rotation_vector.normalized();
// angle of rotation
float theta = -atan2(rotation_vector.norm(), xy_plane_normal_vector.dot(floor_plane_normal_vector));
// transform plane according to angle
Eigen::Affine3f transform_2 = Eigen::Affine3f::Identity();
transform_2.translation() << 0, 0, 30;
transform_2.rotate(Eigen::AngleAxisf(theta, rotation_vector));
pcl::transformPointCloud(*floor_plane, *xyzCentered, transform_2);
// Pointcloud to Mat again
cv::Mat xyzRot = PclToMat(xyzCentered);
// clipLow and clipHigh values obtained from trackbars
clip(xyzRot, clipLow, clipHigh);
cv::Mat xyzRotColor;
colorize(xyzRot, xyzRotColor)
cv::imshow("result", xyzRotColor);
cv::waitKey(1);
}
pcl::PointCloud<pcl::PointXYZ>::Ptr MatToPcl(cv::Mat xyzMat)
{
/*
* Function: Get from a Mat to pcl pointcloud datatype
* In: cv::Mat
* Out: pcl::PointCloud
*/
//char pr=100, pg=100, pb=100;
pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZ>);;
vector<cv::Mat> channels(3);
cv::split(xyzMat, channels);
for (int i = 0; i < ifmXYZ.rows; i++)
{
for (int j = 0; j < ifmXYZ.cols; j++)
{
pcl::PointXYZ point;
point.x = channels[0].at<short>(i,j);
point.y = channels[1].at<short>(i, j);
point.z = channels[2].at<short>(i, j);
// when color needs to be added:
//uint32_t rgb = (static_cast<uint32_t>(pr) << 16 | static_cast<uint32_t>(pg) << 8 | static_cast<uint32_t>(pb));
//point.rgb = *reinterpret_cast<float*>(&rgb);
point_cloud_ptr->points.push_back(point);
}
}
point_cloud_ptr->width = (int)point_cloud_ptr->points.size();
/*point_cloud_ptr->height = 1;*/
return point_cloud_ptr;
}
// convert PCL to cv::Mat, taking only the depth values at z.
cv::Mat PclToMat(pcl::PointCloud<pcl::PointXYZ>::Ptr point_cloud_ptr)
{
cv::Mat depth_image;
if (!depth_image.empty())
depth_image.release();
depth_image.create(132, 176, CV_32F);
int count = 0;
for (int i = 0; i < 132; i++)
{
for (int j = 0; j < 176; j++)
{
depth_image.at<float>(i, j) = point_cloud_ptr->points.at(count++).z;
}
}
depth_image.convertTo(depth_image, CV_16UC1);
return depth_image;
}
/*
* For display purposes with cv::imshow, will convert a 16bit depth image to 8bit 3 channel colored image
* thanks to fmw42 for the function at https://stackoverflow.com/a/67678634/13184944
*/
void colorize(cv::Mat& src, cv::Mat& dst)
{
// stretch the image by rescaling intensity within the output 8-bit range
double oldMin;
double oldMax;
cv::Point minLoc;
cv::Point maxLoc;
cv::minMaxLoc(src, &oldMin, &oldMax, &minLoc, &maxLoc);
double oldRange = oldMax - oldMin;
double newMin = 0.0;
double newMax = 255.0;
double newRange = newMax - newMin;
//cout << oldMin << ' ' << oldMax << ' ' << oldRange << '\n';
// clip the values of the image to the required range
clip(src, oldMin, oldMax);
//TODO: Look at difference between OpenCV normalization and skimage
normalize(src, dst, 0, 255, NORM_MINMAX, CV_8UC1);
//img = (img - cv::Scalar(oldMin)) / (cv::Scalar(oldRange));
//img = (img * cv::Scalar(newRange)) + cv::Scalar(newMin);
cv::Mat channels[3] = { dst, dst, dst };
cv::merge(channels, 3, dst);
cv::Mat C(1, 6, CV_8UC(3));
cv::Vec3b color1 = { 0, 0, 255 };
cv::Vec3b color2 = { 0, 165, 255 };
cv::Vec3b color3 = { 0, 255, 255 };
cv::Vec3b color4 = { 255, 255, 0 };
cv::Vec3b color5 = { 255, 0, 0 };
cv::Vec3b color6 = { 128, 64, 64 };
C.at<cv::Vec3b>(0, 0) = color1;
C.at<cv::Vec3b>(0, 1) = color2;
C.at<cv::Vec3b>(0, 2) = color3;
C.at<cv::Vec3b>(0, 3) = color4;
C.at<cv::Vec3b>(0, 4) = color5;
C.at<cv::Vec3b>(0, 5) = color6;
cv::Mat lut;
cv::resize(C, lut, cv::Size(256, 1), 0.0, 0.0, cv::INTER_LINEAR);
//cout << lut.size << '\n';
cv::LUT(dst, lut, dst);
return;
}
void clip(cv::Mat& m, const uint16_t lowerBound, const uint16_t upperBound)
{
m.setTo(lowerBound, m < lowerBound);
m.setTo(upperBound, m > upperBound);
return;
}
Apologies if this is really basic or something is obviously wrong but I feel stuck here. I also tried segmentation with ransac but the it never aligns the plane in the way I wanted.
Thanks!
Edit: Updated the code to include additional steps and functions. Only the camera initialization is skipped.
The clip and colorize functions aid in displaying the 16bit depth image. My end goal here is to be able to use trackbars with clip(zImg, low, high) where the three lanes will always be vertically aligns (as in, change color at the same rate) as I change the clip values.
download link with image files: link
Colorized depth image:

Weiner deconvolution using opencv

I have developed a way to estimate the point spread function of a motion blur, however I'd like to use the PSF to perform deconvolution. I decided to use the wiener method.
cv::Mat deconvolution(cv::Mat input, cv::Mat kernel){
cv::Mat Fin, Fkern, padded_kern, Fdeblur,out;
cv::normalize(kernel,kernel);
cv::dft(input,Fin,cv::DFT_COMPLEX_OUTPUT);
cv::copyMakeBorder(kernel,padded_kern,0,Fin.rows-kernel.rows,0,Fin.cols-kernel.cols,cv::BORDER_CONSTANT,cv::Scalar::all(0));
cv::dft(padded_kern,Fkern,cv::DFT_COMPLEX_OUTPUT);
cv::mulSpectrums(Fin,Fkern,Fdeblur,0,true);
cv::dft(Fdeblur,out,cv::DFT_INVERSE|cv::DFT_REAL_OUTPUT);
cv::normalize(out,out,0, 1, CV_MINMAX);
return out;
}
However even after setting cv::mulSpectrums(Fin,Fkern,Fdeblur,0,true); last option to true, I still seem to be performing a normal convolution. Should'nt the last true option mean that I am multiplying by the conjugate and therefore dividing the kernel?
Just implemented this filter:
#include <windows.h>
#include <iostream>
#include <vector>
#include <stdio.h>
#include "fstream"
#include "iostream"
#include <algorithm>
#include <iterator>
#include "opencv2/opencv.hpp"
using namespace std;
using namespace cv;
#include <iostream>
#include <vector>
void Recomb(Mat &src, Mat &dst)
{
int cx = src.cols >> 1;
int cy = src.rows >> 1;
Mat tmp;
tmp.create(src.size(), src.type());
src(Rect(0, 0, cx, cy)).copyTo(tmp(Rect(cx, cy, cx, cy)));
src(Rect(cx, cy, cx, cy)).copyTo(tmp(Rect(0, 0, cx, cy)));
src(Rect(cx, 0, cx, cy)).copyTo(tmp(Rect(0, cy, cx, cy)));
src(Rect(0, cy, cx, cy)).copyTo(tmp(Rect(cx, 0, cx, cy)));
dst = tmp;
}
void convolveDFT(Mat& A, Mat& B, Mat& C)
{
// reallocate the output array if needed
C.create(abs(A.rows - B.rows) + 1, abs(A.cols - B.cols) + 1, A.type());
Size dftSize;
// compute the size of DFT transform
dftSize.width = getOptimalDFTSize(A.cols + B.cols - 1);
dftSize.height = getOptimalDFTSize(A.rows + B.rows - 1);
// allocate temporary buffers and initialize them with 0's
Mat tempA(dftSize, A.type(), Scalar::all(0));
Mat tempB(dftSize, B.type(), Scalar::all(0));
// copy A and B to the top-left corners of tempA and tempB, respectively
Mat roiA(tempA, Rect(0, 0, A.cols, A.rows));
A.copyTo(roiA);
Mat roiB(tempB, Rect(0, 0, B.cols, B.rows));
B.copyTo(roiB);
// now transform the padded A & B in-place;
// use "nonzeroRows" hint for faster processing
dft(tempA, tempA, 0, A.rows);
dft(tempB, tempB, 0, A.rows);
// multiply the spectrums;
// the function handles packed spectrum representations well
mulSpectrums(tempA, tempB, tempA, 0);
// transform the product back from the frequency domain.
// Even though all the result rows will be non-zero,
// you need only the first C.rows of them, and thus you
// pass nonzeroRows == C.rows
dft(tempA, tempA, DFT_INVERSE + DFT_SCALE);
// now copy the result back to C.
C = tempA(Rect((dftSize.width - A.cols) / 2, (dftSize.height - A.rows) / 2, A.cols, A.rows)).clone();
// all the temporary buffers will be deallocated automatically
}
//----------------------------------------------------------
// Compute Re and Im planes of FFT from Image
//----------------------------------------------------------
void ForwardFFT(Mat &Src, Mat *FImg)
{
int M = getOptimalDFTSize(Src.rows);
int N = getOptimalDFTSize(Src.cols);
Mat padded;
copyMakeBorder(Src, padded, 0, M - Src.rows, 0, N - Src.cols, BORDER_CONSTANT, Scalar::all(0));
Mat planes[] = { Mat_<double>(padded), Mat::zeros(padded.size(), CV_64FC1) };
Mat complexImg;
merge(planes, 2, complexImg);
dft(complexImg, complexImg);
split(complexImg, planes);
// crop result
planes[0] = planes[0](Rect(0, 0, Src.cols, Src.rows));
planes[1] = planes[1](Rect(0, 0, Src.cols, Src.rows));
FImg[0] = planes[0].clone();
FImg[1] = planes[1].clone();
}
//----------------------------------------------------------
// Compute image from Re and Im parts of FFT
//----------------------------------------------------------
void InverseFFT(Mat *FImg, Mat &Dst)
{
Mat complexImg;
merge(FImg, 2, complexImg);
dft(complexImg, complexImg, DFT_INVERSE + DFT_SCALE);
split(complexImg, FImg);
Dst = FImg[0];
}
//----------------------------------------------------------
// wiener Filter
//----------------------------------------------------------
void wienerFilter(Mat &src, Mat &dst, Mat &_h, double k)
{
//---------------------------------------------------
// Small epsilon to avoid division by zero
//---------------------------------------------------
const double eps = 1E-8;
//---------------------------------------------------
int ImgW = src.size().width;
int ImgH = src.size().height;
//--------------------------------------------------
Mat Yf[2];
ForwardFFT(src, Yf);
//--------------------------------------------------
Mat h = Mat::zeros(ImgH, ImgW, CV_64FC1);
int padx = h.cols - _h.cols;
int pady = h.rows - _h.rows;
copyMakeBorder(_h, h, pady / 2, pady - pady / 2, padx / 2, padx - padx / 2, BORDER_CONSTANT, Scalar::all(0));
Mat Hf[2];
ForwardFFT(h, Hf);
//--------------------------------------------------
Mat Fu[2];
Fu[0] = Mat::zeros(ImgH, ImgW, CV_64FC1);
Fu[1] = Mat::zeros(ImgH, ImgW, CV_64FC1);
complex<double> a;
complex<double> b;
complex<double> c;
double Hf_Re;
double Hf_Im;
double Phf;
double hfz;
double hz;
double A;
for (int i = 0; i < h.rows; i++)
{
for (int j = 0; j < h.cols; j++)
{
Hf_Re = Hf[0].at<double>(i, j);
Hf_Im = Hf[1].at<double>(i, j);
Phf = Hf_Re*Hf_Re + Hf_Im*Hf_Im;
hfz = (Phf < eps)*eps;
hz = (h.at<double>(i, j) > 0);
A = Phf / (Phf + hz + k);
a = complex<double>(Yf[0].at<double>(i, j), Yf[1].at<double>(i, j));
b = complex<double>(Hf_Re + hfz, Hf_Im + hfz);
c = a / b; // Deconvolution :) other work to avoid division by zero
Fu[0].at<double>(i, j) = (c.real()*A);
Fu[1].at<double>(i, j) = (c.imag()*A);
}
}
InverseFFT(Fu, dst);
Recomb(dst, dst);
}
// ---------------------------------
//
// ---------------------------------
int main(int argc, char** argv)
{
namedWindow("Image");
namedWindow("Kernel");
namedWindow("Result");
Mat Img = imread("F:\\ImagesForTest\\lena.jpg", 0); // Source image
Img.convertTo(Img, CV_32FC1, 1.0 / 255.0);
Mat kernel = imread("F:\\ImagesForTest\\Point.jpg", 0); // PSF
//resize(kernel, kernel, Size(), 0.5, 0.5);
kernel.convertTo(kernel, CV_32FC1, 1.0 / 255.0);
float kernel_sum = cv::sum(kernel)[0];
kernel /= kernel_sum;
int width = Img.cols;
int height = Img.rows;
Mat resim;
convolveDFT(Img, kernel, resim);
Mat resim2;
kernel.convertTo(kernel, CV_64FC1);
// Apply filter
wienerFilter(resim, resim2, kernel, 0.01);
imshow("Результат фильтрации", resim2);
imshow("Kernel", kernel * 255);
imshow("Image", Img);
imshow("Result", resim);
cvWaitKey(0);
}
Results look like this (as you see it not 100% restoration):

How to detect squares in video with OpenCV?

So I combined squares.cpp with cvBoundingRect.cpp code to detect squares in video. I therefore, had to convert from IplImage to Mat type so that findSquares and drawSquares methods could run (By using cvarrToMat function). But unfortunately, after successful compilation I get this error when running:
OpenCV Error: Assertion failed (j < nsrcs && src[j].depth() == depth) in mixChannels, file /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp, line 1205
libc++abi.dylib: terminating with uncaught exception of type cv::Exception: /Users/Desktop/opencv-3.0.0-rc1/modules/core/src/convert.cpp:1205: error: (-215) j < nsrcs && src[j].depth() == depth in function mixChannels
Abort trap: 6
Here's the code:
#include "opencv2/core/core.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <math.h>
#include <string.h>
using namespace cv;
using namespace std;
int thresh = 50, N = 11;
const char* wndname = "Square Detection Demo";
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
static double angle( Point pt1, Point pt2, Point pt0 )
{
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
static void findSquares( const Mat& image, vector<vector<Point> >& squares )
{
squares.clear();
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols/2, image.rows/2));
pyrUp(pyr, timg, image.size());
vector<vector<Point> > contours;
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
int ch[] = {c, 0};
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for( int l = 0; l < N; l++ )
{
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if( l == 0 )
{
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l+1)*255/N;
}
// find contours and store them all as a list
findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for( size_t i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.push_back(approx);
}
}
}
}
}
// the function draws all the squares in the image
static void drawSquares( Mat& image, const vector<vector<Point> >& squares )
{
for( size_t i = 0; i < squares.size(); i++ )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(image, &p, &n, 1, true, Scalar(255,0,0), 3, LINE_AA);
}
imshow(wndname, image);
}
CvRect rect;
CvSeq* contours = 0;
CvMemStorage* storage = NULL;
CvCapture *cam;
IplImage *currentFrame, *currentFrame_grey, *differenceImg, *oldFrame_grey;
bool first = true;
int main(int argc, char* argv[])
{
//Create a new movie capture object.
cam = cvCaptureFromCAM(0);
//create storage for contours
storage = cvCreateMemStorage(0);
//capture current frame from webcam
currentFrame = cvQueryFrame(cam);
//Size of the image.
CvSize imgSize;
imgSize.width = currentFrame->width;
imgSize.height = currentFrame->height;
//Images to use in the program.
currentFrame_grey = cvCreateImage( imgSize, IPL_DEPTH_8U, 1);
namedWindow( wndname, 1 );
vector<vector<Point> > squares;
while(1)
{
currentFrame = cvQueryFrame( cam );
if( !currentFrame ) break;
//Convert the image to grayscale.
cvCvtColor(currentFrame,currentFrame_grey,CV_RGB2GRAY);
if(first) //Capturing Background for the first time
{
differenceImg = cvCloneImage(currentFrame_grey);
oldFrame_grey = cvCloneImage(currentFrame_grey);
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
first = false;
continue;
}
//Minus the current frame from the moving average.
cvAbsDiff(oldFrame_grey,currentFrame_grey,differenceImg);
//bluring the differnece image
cvSmooth(differenceImg, differenceImg, CV_BLUR);
//apply threshold to discard small unwanted movements
cvThreshold(differenceImg, differenceImg, 25, 255, CV_THRESH_BINARY);
//find contours
cv::Mat diffImg = cv::cvarrToMat(differenceImg);
cv::Mat currFrame = cv::cvarrToMat(currentFrame);
findSquares(diffImg, squares);
//draw bounding box around each contour
drawSquares(currFrame, squares);
//display colour image with bounding box
cvShowImage("Output Image", currentFrame);
//display threshold image
cvShowImage("Difference image", differenceImg);
//New Background
cvConvertScale(currentFrame_grey, oldFrame_grey, 1.0, 0.0);
//clear memory and contours
cvClearMemStorage( storage );
contours = 0;
//press Esc to exit
char c = cvWaitKey(33);
if( c == 27 ) break;
}
// Destroy the image & movies objects
cvReleaseImage(&oldFrame_grey);
cvReleaseImage(&differenceImg);
cvReleaseImage(&currentFrame);
cvReleaseImage(&currentFrame_grey);
return 0;
}
As the error message says, your problem is in cv::mixChannels(). See documentation.
Or you could simply do something like
cv::Mat channels[3];
cv::split(multiChannelImage, channels);
and then access each channel using
cv::Mat currChannel = channels[channelNumber]

Local normalization in OpenCV

I'm trying to implement in OpenCV a local normalization algorithm to reduce the difference of illumination in an image. I have found a MATLAB function, and I have implemented it in OpenCV. However, the result that I get is different from the one given by the MATLAB function.
This is my code:
Mat localNorm(Mat image, float sigma1, float sigma2)
{
Mat floatGray, blurred1, blurred2, temp1, temp2, res;
image.convertTo(floatGray, CV_32FC1);
floatGray = floatGray/255.0;
int blur1 = 2*ceil(-NormInv(0.05, 0, sigma1))+1;
cv::GaussianBlur(floatGray, blurred1, cv::Size(blur1,blur1), sigma1);
temp1 = floatGray-blurred1;
cv::pow(temp1, 2.0, temp2);
int blur2 = 2*ceil(-NormInv(0.05, 0, sigma2))+1;
cv::GaussianBlur(temp2, blurred2, cv::Size(blur2,blur2), sigma2);
cv::pow(blurred2, 0.5, temp2);
floatGray = temp1/temp2;
floatGray = 255.0*floatGray;
floatGray.convertTo(res, CV_8UC1);
return res;
}
The function NormInv is the C++ implementation given by Euan Dean in this post.
The following shows the result that I am getting and the theoretical result, for the same values of sigma1 and sigma2 (2.0 and 20.0, respectively)
I have tried using different values for sigma1 and sigma2, but none of them seem to work. I have also tried doing blur1=0 and blur2=0 in the Gaussian function but it doesn't work either.
Any help would be appreciated. Thanks in advance.
you need to normalize the image between 0 and 255 before converting it to CV_8UC1
Here is my implementation (I am using sigma1=2, sigma2=20):
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;
int main(int argc, char** argv)
{
Mat img, gray, float_gray, blur, num, den;
// Load color image
img = cv::imread("lena.png", 1);
if( !img.data ) {
return -1;
}
// convert to grayscale
cv::cvtColor(img, gray, CV_BGR2GRAY);
// convert to floating-point image
gray.convertTo(float_gray, CV_32F, 1.0/255.0);
// numerator = img - gauss_blur(img)
cv::GaussianBlur(float_gray, blur, Size(0,0), 2, 2);
num = float_gray - blur;
// denominator = sqrt(gauss_blur(img^2))
cv::GaussianBlur(num.mul(num), blur, Size(0,0), 20, 20);
cv::pow(blur, 0.5, den);
// output = numerator / denominator
gray = num / den;
// normalize output into [0,1]
cv::normalize(gray, gray, 0.0, 1.0, NORM_MINMAX, -1);
// Display
namedWindow("demo", CV_WINDOW_AUTOSIZE );
imshow("demo", gray);
waitKey(0);
return 0;
}
The result as expected:
Note that you can specify the kernel size as Size(0,0) and it will be computed from the sigma values.
This is the Python implementation of the same algo above:
import cv2
import numpy as np
img = cv2.imread('/home/anmol/Downloads/lena.png')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
float_gray = gray.astype(np.float32) / 255.0
blur = cv2.GaussianBlur(float_gray, (0, 0), sigmaX=2, sigmaY=2)
num = float_gray - blur
blur = cv2.GaussianBlur(num*num, (0, 0), sigmaX=20, sigmaY=20)
den = cv2.pow(blur, 0.5)
gray = num / den
gray = cv2.normalize(gray, dst=gray, alpha=0.0, beta=1.0, norm_type=cv2.NORM_MINMAX)
cv2.imwrite("./debug.png", gray * 255)
Outout:

logic behind the code

this is from opencv hough lines, can any one explain me, after changing it tio cartesian
WHY THEY ADDED a+1000, -b*1000
#include <cv.h>
#include <highgui.h>
#include <math.h>
int main(int argc, char** argv)
{
IplImage* src;
if( argc == 2 && (src=cvLoadImage(argv[1], 0))!= 0)
{
IplImage* dst = cvCreateImage( cvGetSize(src), 8, 1 );
IplImage* color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* lines = 0;
int i;
cvCanny( src, dst, 50, 200, 3 );
cvCvtColor( dst, color_dst, CV_GRAY2BGR );
#if 1
lines = cvHoughLines2( dst,
storage,
CV_HOUGH_STANDARD,
1,
CV_PI/180,
100,
0,
0 );
for( i = 0; i < MIN(lines->total,100); i++ )
{
float* line = (float*)cvGetSeqElem(lines,i);
float rho = line[0];
float theta = line[1];
CvPoint pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 3, 8 );
}
Cos and Sin go from -1 to +1, so the origin of the Hough accumalator space is at 0,0.
Assuming your display has positive size it's convenient to have the centre of the plot in the middle of the screen.
Perhaps they wanted to get corners of the bounding rectangle around a given center?
It is a hack.
Try this. Run the example as is. Remove the 4 instances of 1000. You will get points instead of lines.
Put in 750 instead of 1000. You get the same result as if you had put in 1000.
The 1000 is to make sure the lines get drawn across the image. You could also do the following, which is
a little better:
Right after HoughLines(...) is called, add the following:
int h = src.rows;
int w = src.cols;
int factor = (int) (sqrt(h * h + w * w)); // diagonal length of the image, maximum line length
Then instead of 1000, multiply by factor. If your image is greater than 1000x1000, the original
code won't work.
Roy