logic behind the code - c++

this is from opencv hough lines, can any one explain me, after changing it tio cartesian
WHY THEY ADDED a+1000, -b*1000
#include <cv.h>
#include <highgui.h>
#include <math.h>
int main(int argc, char** argv)
{
IplImage* src;
if( argc == 2 && (src=cvLoadImage(argv[1], 0))!= 0)
{
IplImage* dst = cvCreateImage( cvGetSize(src), 8, 1 );
IplImage* color_dst = cvCreateImage( cvGetSize(src), 8, 3 );
CvMemStorage* storage = cvCreateMemStorage(0);
CvSeq* lines = 0;
int i;
cvCanny( src, dst, 50, 200, 3 );
cvCvtColor( dst, color_dst, CV_GRAY2BGR );
#if 1
lines = cvHoughLines2( dst,
storage,
CV_HOUGH_STANDARD,
1,
CV_PI/180,
100,
0,
0 );
for( i = 0; i < MIN(lines->total,100); i++ )
{
float* line = (float*)cvGetSeqElem(lines,i);
float rho = line[0];
float theta = line[1];
CvPoint pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cvLine( color_dst, pt1, pt2, CV_RGB(255,0,0), 3, 8 );
}

Cos and Sin go from -1 to +1, so the origin of the Hough accumalator space is at 0,0.
Assuming your display has positive size it's convenient to have the centre of the plot in the middle of the screen.

Perhaps they wanted to get corners of the bounding rectangle around a given center?

It is a hack.
Try this. Run the example as is. Remove the 4 instances of 1000. You will get points instead of lines.
Put in 750 instead of 1000. You get the same result as if you had put in 1000.
The 1000 is to make sure the lines get drawn across the image. You could also do the following, which is
a little better:
Right after HoughLines(...) is called, add the following:
int h = src.rows;
int w = src.cols;
int factor = (int) (sqrt(h * h + w * w)); // diagonal length of the image, maximum line length
Then instead of 1000, multiply by factor. If your image is greater than 1000x1000, the original
code won't work.
Roy

Related

c++, opecv: HoughLines() doesn't receive the values passed by trackbars

I'm supposed to detect the two white lines of the road with the function HoughLines. I use three trackbars in order to find the best parameters to detect ONLY the two white lines of the road. I have tried this: (the problem is that it looks like even if I change the values of the trackbars it doesn't updates the images, it is still at the first values). I'm using opencv with c++.
Without trackbars it works, but it's almost impossibile finding good values without it because I don't know how to tune the parameters and the image is pretty complex.
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/calib3d.hpp>
using namespace cv;
using namespace std;
const int kernel_size = 3;
Mat src, src_gray;
Mat dst, detected_edges;
Mat cdst;
int slider_value_one;
int slider_value_two;
int slider_value_three;
vector<Vec2f> lines; // will hold the results of the detection
static void Hough_transform(int, void*)
{
// Standard Hough Line Transform
HoughLines(detected_edges, lines, 1, CV_PI/180, 130,slider_value_one,slider_value_two); // runs the actual detectio
// Draw the lines
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
line( cdst, pt1, pt2, Scalar(0,0,255), 3, CV_AA);
}
//printing
imshow("standard Hough Line Transform", cdst);
}//HoughTransform
int main(int argc, const char * argv[]) {
//-----Loads an image
src = imread("/Users/massimilianolorenzin/Documents/Progetti\ XCode/lab4/lab4/lab4/images/road2.png");
/// ---- CANNY DETECTOR
/// Convert the image to grayscale
cvtColor( src, src_gray, CV_BGR2GRAY);
/// Reduce noise with a kernel 3x3
blur( src_gray, detected_edges, Size(3,3) );
/// Canny detector
Canny( detected_edges, detected_edges, 150, 450, kernel_size );
// Copy edges to the images that will display the results in BGR
cvtColor(detected_edges, cdst, COLOR_GRAY2BGR);
/// ---- HOUGH LINE TRANSFORM
namedWindow("standard Hough Line Transform"); // Create Window
//first TrackBar
createTrackbar( "First Par", "standard Hough Line Transform", &slider_value_one, 200, Hough_transform);
Hough_transform(slider_value_one,0 );
//second TrackBar
createTrackbar( "Second Par", "standard Hough Line Transform", &slider_value_two, 100, Hough_transform);
Hough_transform(slider_value_two, 0 );
//third TrackBar
createTrackbar( "Third Par", "standard Hough Line Transform", &slider_value_three, 100, Hough_transform);
Hough_transform( slider_value_three, 0 );
//printing
imshow("Input Image",src);
imshow( "edges", detected_edges );
waitKey(0);
return 0;
HoughLines() looks doesn't answer at the values set with the trackbars. The window appear normally, slider_value_one, slider_value_two, slider_value_three have the right values because I printed and i saw them, so I don't understand why HoughLines() doesn't take the passed values.
enter image description here
Above there is the input image, while below there is the final ouput. I am asked, in this step, just to create the two lines to the side of the road, coloring the area between the two lines( like in the photo) is requested in the next step.

OpenCV With HoughLines(Visual Studio C++): _CrtIsValidHeapPointer(pUserData) Breakpoint Error

I am still pretty new to OpenCV and I've just recently come across the HoughLinesP function. First and foremost, my goal is to write code that will detect rectangles in a webcam. Currently, the code I have below only detect lines in general. However, I still have problems while I am debugging the program. Here is my code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace cv;
using namespace std;
int main() {
int erosion_size = 0;
VideoCapture cam(0);
if (!cam.isOpened()) {
cout << "cannot open camera";
}
while (true) {
Mat frame;
cam.read(frame);
Mat gray, edge, draw, die;
cvtColor(frame, gray, CV_BGR2GRAY);
Canny(gray, edge, 100, 150, 3);
edge.convertTo(draw, CV_8U);
dilate(draw, die, Mat(), Point(-1, -1), 2, 1, 1);
erode(die, die, Mat(), Point(-1, -1), 1, 1, 1);
#if 0
vector<Vec2f> lines;
HoughLines(die, lines, 1, CV_PI / 180, 100, 0, 0);
for (size_t i = 0; i < lines.size(); i++)
{
float rho = lines[i][0], theta = lines[i][1];
Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000 * (-b));
pt1.y = cvRound(y0 + 1000 * (a));
pt2.x = cvRound(x0 - 1000 * (-b));
pt2.y = cvRound(y0 - 1000 * (a));
line(frame, pt1, pt2, Scalar(0, 0, 255), 3, CV_AA);
}
#else
vector<Vec4i> lines;
HoughLinesP(die, lines, 1, CV_PI / 180, 200, 50, 10);
for (size_t i = 0; i < lines.size(); i++)
{
Vec4i l = lines[i];
line(frame, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0, 0, 255), 3, CV_AA);
}
#endif
imshow("Canny", die);
imshow("original", frame);
if (waitKey(30) >= 0)
break;
}
return 0;
}
When I debug the program, the webcam pops up okay, but when I show a rectangular object with lines(piece of paper) it stops the program with a break point error. I concluded that my program stopped every time it found a line. When I choose to continue instead of breaking, it gives me this error:
Debug Assertion failed!
Program: ....Studio
2013/Projects/TrialRectangle/Debug/TrialRectangle.exe
File: f:/dd/vctools/crt/crtw32/misc/dbgheap.c
Line: 1332
Expression: _CrtIsValidHeapPointer(pUserData)
I played around with the HoughLinesP function and found that a high threshold parameter(ex. 500) seems to make my program run fine BUT it does not show any HoughLines at all in my webcam. If someone could explain why that is, that would be helpful as well!
Does anyone have any ideas as to how to solve this breakpoint error?
I got into this Debug Assertion too.
In my case, It was because that my project was statically compiled, Yet I used OpenCV dynamically with it's dll. So I change my project into dynamically compile, Solved the Problem.
It's Because that OpenCV object is allocated in different heap. And When this object is destructed, current run-time can's find that heap, This is why the Assertion is hit.

OpenCV Drawn Lines on Contour (c++)

I want to draw lines on the following picture, that I can caclulate the length of each line. My problem is that when I try it with the following code my image get completly white.
std::vector<cv::Vec2f> lines;
cv::HoughLines(drawing_small, lines, 1, CV_PI/180, 50, 0, 0 );
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][1];
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
cv::line( drawing_small, pt1, pt2, cv::Scalar(0,100,0), 3, CV_AA);
}
Something like that:
I would be very happy if anyone can say me what I can do.
Update
This is what I do before:
cv::findContours(dst, contours_small, hierarchy_small, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, cv::Point(0, 0) );
//Detecting Contours
std::vector<cv::Point2f> ContCenter_small(contours_small.size());
cv::Mat drawing_small = cv::Mat::zeros( dst.size(), CV_8UC3 );
for( int i = 0; i < contours_small.size(); i++ )
{
ContArea_small[i] = moments(contours_small[i], false);
ContCenter_small[i] = cv::Point2f(ContArea_small[i].m10/ContArea_small[i].m00, ContArea_small[i].m01/ContArea_small[i].m00);
cv::Scalar color_small = cv::Scalar(0,255,0);
if(ContArea_small[i].m00 > 2000)
{
drawContours( drawing_small, contours_small, i, color_small, CV_FILLED , 8, hierarchy_small, 1, cv::Point() );
}
}
cv::imwrite("contour.jpg",drawing_small);
cv::dilate(drawing_small, drawing_small,C,cv::Point(-1,-1),1,1,20);
cv::threshold(drawing_small,drawing_small,100,255,cv::THRESH_BINARY_INV);
cv::GaussianBlur(drawing_small,drawing_small,cv::Size(9,9),11);
This probably means that Hough Transform did manage to find any lines on your picture. In this case you should pre-filter your image first. For example, you can try Otsu's thresholding and Gaussian blur. And if I were you than I would first start from trying to pass different parameters to cv::HoughLines (especially threshold -- The minimum number of intersections to “detect” a line)
Make sure you are drawing lines on and outputting the source image instead of some processed one. Can you show us more code about what you did exactly.

detecting 2 lines opencv

I have an image on which I run a dilation, and works fine, now I want to detect two dick lines on it :
and when run on it the part of code:
cv::Canny(dilationResult,canny,50,200,3);
cv::cvtColor(dilationResult,dilationResult,CV_BGR2GRAY);
cv::HoughLines(canny,lines,30,CV_PI/180,500,0);
cv::cvtColor(mask,mask,CV_GRAY2BGR);
if(lines.size()!=0){
std::cout << " line Size " << lines.size()<< std::endl;
for( size_t i = 0; i < lines.size(); i++ )
{
float rho = lines[i][0], theta = lines[i][2];
cv::Point pt1, pt2;
double a = cos(theta), b = sin(theta);
double x0 = a*rho, y0 = b*rho;
pt1.x = cvRound(x0 + 1000*(-b));
pt1.y = cvRound(y0 + 1000*(a));
pt2.x = cvRound(x0 - 1000*(-b));
pt2.y = cvRound(y0 - 1000*(a));
angle = atan2f((pt2.y-pt1.y),(pt2.x-pt1.x))*180.0/CV_PI;
std::cout << "angle " << angle<< std::endl;
line( mask, pt1, pt2, cv::Scalar(0,0,255), 3, CV_AA);
}
}
cv::imshow("mask " ,mask);
here's the result:
what I would like to get is something like this :
getting only 2 lines that have the same width, and by the way I don't want to use findcontour function
any idea how can do this !
I don't get it to work with hough transform, but with the probabilistic version cv::HoughLinesP
with lineDetection_Input.jpg being your linked image
int main()
{
cv::Mat color = cv::imread("../lineDetection_Input.jpg");
cv::Mat gray;
cv::cvtColor(color, gray, CV_RGB2GRAY);
std::vector<cv::Vec4i> lines;
cv::HoughLinesP( gray, lines, 1, 2*CV_PI/180, 100, 100, 50 );
for( size_t i = 0; i < lines.size(); i++ )
{
cv::line( color, cv::Point(lines[i][0], lines[i][1]),
cv::Point(lines[i][2], lines[i][3]), cv::Scalar(0,0,255), 1);
}
cv::imwrite("lineDetection_Output.jpg", color);
cv::namedWindow("output"); cv::imshow("output", color); cv::waitKey(-1);
return 0;
}
lineDetection_Output.jpg:
for rotated image:
and for some different intersection angle:
there you can see some lines detected with a slightly false angle that start in the top-right and end near the intersection (close behind), but these might be easily filtered by length or something.

spectral residual saliency detection in C++ with CImg

I'm trying to implement the spectral residual approach for saliency detection, described in this paper:
http://www.klab.caltech.edu/~xhou/papers/cvpr07.pdf
There is a reference implementation in Matlab Code, taken from their website:
http://www.klab.caltech.edu/~xhou/projects/spectralResidual/spectralresidual.html
clear
clc
%% Read image from file
inImg = im2double(rgb2gray(imread('yourImage.jpg')));
inImg = imresize(inImg, 64/size(inImg, 2));
%% Spectral Residual
myFFT = fft2(inImg);
myLogAmplitude = log(abs(myFFT));
myPhase = angle(myFFT);
mySpectralResidual = myLogAmplitude - imfilter(myLogAmplitude, fspecial('average', 3),'replicate');
saliencyMap = abs(ifft2(exp(mySpectralResidual + i*myPhase))).^2;
%% After Effect
saliencyMap = mat2gray(imfilter(saliencyMap, fspecial('gaussian', [10, 10], 2.5)));
imshow(saliencyMap);
I've tried to translate it to C++ with CImg.
Where I fail is here:
myPhase = angle(myFFT);
and here
saliencyMap = abs(ifft2(exp(mySpectralResidual + i*myPhase))).^2;
Here's my code:
#include <CImg.h>
#include <iostream>
using namespace cimg_library;
int main() {
CImg<unsigned char> image("img2.jpg");
CImg<float> mask(3,3,1,1,1.0/9.0);
image.resize(64,64);
CImgList<float> myFFT = image.get_FFT();
const CImg<float> MyLogAmplitude = ((myFFT[0].get_pow(2) + myFFT[1].get_pow(2)).get_sqrt()).get_log(); //Magnitude
const CImg<float> MyPhase = myFFT[0].get_atan2(myFFT[1]);
const CImg<float> A = MyLogAmplitude.get_convolve(mask);
const CImg<float> MySpectralResidual = MyLogAmplitude-A;
CImgList<float> tmp = CImgList<float>(MyResidual.get_exp(),MyPhase);
CImgList<float> MySaliencyMap = tmp.get_FFT(true);
CImgDisplay draw_disp0(MySaliencyMap,"Image");
while (!draw_disp0.is_closed()) {
draw_disp0.wait();
}
return 0;
}
Anybody seen an obvious mistake?
I think I can see two mistakes in your code :
First, the atan2() call for MyPhase has arguments inverted. Should be written as
const CImg MyPhase = myFFT[1].get_atan2(myFFT[0]);
(but this is probably not much of an issue here).
Second, and it is more serious, you are doing the inverse FFT on a pair of complex values coded as (amplitude,phase), which is not what CImg expects there, as the FFT() function supposes you input a (real,imaginary) pair of images. That probably makes a huge difference in the result.
Actually I had the same problem. Here is the problem solving code.
I edited some to create a rectangle around the object salient.
This code works for me.
#include "highgui.h"
#include "opencv2/imgproc/imgproc.hpp"
#include "cv.h"
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <iostream>
using namespace cv;
using namespace std;
// function Fourier transform
void fft2(IplImage *src, IplImage *dst);
int main()
{
string imagePath = "inputgambar/34.jpg";
//string imageSave = "saliency/42.jpg";
//string imageRectangular = "rectangular/42.jpg";
IplImage *ImageAsli, *ImageSaliency, *src, *ImageRe, *ImageIm, *Fourier, *Inverse, *LogAmplitude, *Sine, *Cosine;
IplImage *Saliency, *Residual;
IplImage *tmp1, *tmp2, *tmp3;
Mat gambarSave, threshold_output;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
double minNum = 0, maxNum = 0, scale, shift, rata2, nilaiThreshold, Lebar, gantiPixel;
// load image Asli
ImageAsli = cvLoadImage(imagePath.c_str());
cvNamedWindow("ImageAsli", CV_WINDOW_NORMAL);
cvShowImage("ImageAsli", ImageAsli);
cvMoveWindow("ImageAsli",0,100);
// Load image, jadikan single channel/gray
//inputImage = cvLoadImage(imagePath.c_str());
src = cvLoadImage(imagePath.c_str(),0);
Lebar = src->width;
gantiPixel = 64/Lebar;
// Fourier , punya 2 channel, Real dan Imajiner
Fourier = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 2);
Inverse = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 2);
// Real , Imajiner spektrum
ImageRe = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
ImageIm = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
// log amplitude
LogAmplitude = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
// Sinus, Cosinus spektrum
Sine = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
Cosine = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
// spectral residual
Residual = cvCreateImage (cvGetSize (src), IPL_DEPTH_64F, 1);
// Saliency
Saliency = cvCreateImage (cvGetSize (src), src-> depth, src-> nChannels);
// Temporary space
tmp1 = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
tmp2 = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
tmp3 = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
//
scale = 1.0/255.0;
cvConvertScale (src, tmp1, 1, 0);
//
fft2 (tmp1, Fourier);
// Real dan Imajiner ditaruh di ImageRe ImageIm
cvSplit (Fourier, ImageRe, ImageIm, 0, 0);
// Magnitude/Amplitudo Fourier di tmp3
cvPow( ImageRe, tmp1, 2.0);
cvPow( ImageIm, tmp2, 2.0);
cvAdd( tmp1, tmp2, tmp3);
cvPow( tmp3, tmp3, 0.5 );
// logAmplitude , sin, cosin
cvLog (tmp3, LogAmplitude);
cvDiv (ImageIm, tmp3, Sine);
cvDiv (ImageRe, tmp3, Cosine);
// smoothing (1/(3×3)) * ones(3), mean filter pada logAmplitude ditempatkan pada tmp3
cvSmooth (LogAmplitude, tmp3, CV_BLUR, 3, 3);
// Spectral Residual = LogAmp-tmp3
cvSub (LogAmplitude, tmp3, Residual);
/************************************************************************ /
inverse Fourier Transform --> exp (Residual + i * Phase)
Euler's formula:
exp(r + i * T) = exp(r) * (cos(T) + i * sin(T)) = exp(r) * cos(T) + i * exp(r) * sin(T)
Sin (T) = ImageIm / LogAmplitude; cos(T) = ImageRe / LogAmplitude;
/************************************************************************/
cvExp(Residual, Residual);
cvMul(Residual, Cosine, tmp1);
cvMul(Residual, Sine, tmp2);
// Merging Residual, Saliency 1 channel => Fourier 2 channel
cvMerge (tmp1, tmp2, 0, 0, Fourier);
// Inverse Fourier transform
cvDFT (Fourier, Inverse, CV_DXT_INV_SCALE);
cvSplit (Inverse, tmp1, tmp2, 0,0);
// tmp3 = kuadrat akar tmp1 tmp2
cvPow (tmp1, tmp1, 2);
cvPow (tmp2, tmp2, 2);
cvAdd (tmp1, tmp2, tmp3);
// Gaussian filter 7x7 kernel
cvSmooth (tmp3, tmp3, CV_GAUSSIAN, 7, 7);
//CoreCVminmaxloc
cvMinMaxLoc (tmp3, & minNum, & maxNum, NULL, NULL);
scale = 255 / (maxNum - minNum);
shift =-minNum * scale;
// End of Saliency
cvConvertScale(tmp3, Saliency, scale, shift);
//deteksi proto objek
CvScalar rataan = cvAvg(Saliency);
nilaiThreshold = 3* (rataan .val[0]);
//cout << nilaiThreshold ;
gambarSave = Mat(Saliency);
//imwrite(imageSave.c_str(), gambarSave);
//resize(gambarSave, gambarSave, Size(), gantiPixel, gantiPixel, CV_INTER_AREA);
//ImageSaliency = cvCreateImage(cvSize(Saliency-> width * gantiPixel, Saliency-> height *gantiPixel), Saliency -> depth, Saliency -> nChannels);
//cvResize(Saliency, ImageSaliency, CV_INTER_AREA);
cvNamedWindow("Saliency", CV_WINDOW_NORMAL);
cvShowImage("Saliency", Saliency);
cvMoveWindow("Saliency",0,500);
/// Detect edges using Threshold
threshold( gambarSave, threshold_output, nilaiThreshold, 255, THRESH_BINARY );
/// Find contours
findContours( threshold_output, contours, hierarchy, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) );
/// Find the rotated rectangles
vector<RotatedRect> minRect( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{ minRect[i] = minAreaRect( Mat(contours[i]) );
}
/// Draw rotated rects
for( int i = 0; i< contours.size(); i++ )
{
// rotated rectangle
Point2f rect_points[4]; minRect[i].points( rect_points );
for( int j = 0; j < 4; j++ )
line( gambarSave, rect_points[j], rect_points[(j+1)%4], Scalar(100), 2, 8 );
}
//imwrite(imageRectangular.c_str(), gambarSave);
/// Show in a window
namedWindow( "Rectangular", CV_WINDOW_AUTOSIZE );
imshow( "Rectangular", gambarSave );
cvMoveWindow("Rectangular",480,100);
cvWaitKey(0);
//Release images
cvReleaseImage(&src);
cvReleaseImage(&ImageIm);
cvReleaseImage(&ImageRe);
cvReleaseImage(&Fourier);
cvReleaseImage(&Inverse);
cvReleaseImage(&LogAmplitude);
cvReleaseImage(&Sine);
cvReleaseImage(&Cosine);
cvReleaseImage(&Saliency);
cvReleaseImage(&Residual);
cvReleaseImage(&tmp1);
cvReleaseImage(&tmp2);
cvReleaseImage(&tmp3);
cvReleaseImage(&ImageAsli);
cvDestroyAllWindows();
return 0;
}
//Fourier transform
void fft2(IplImage *src, IplImage *dst)
{
IplImage *image_Re = 0, *image_Im = 0, *Fourier = 0;
//1 channel ImageRe, ImageIm
image_Re = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
image_Im = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 1);
//2 channels (image_Re, image_Im)
Fourier = cvCreateImage(cvGetSize(src), IPL_DEPTH_64F, 2);
/************************************************* ***********************/
// isi nilai image_Re
cvConvertScale(src, image_Re, 1, 0);
// nilai initial Imajiner di Set 0
cvZero(image_Im);
// Join real and imaginary parts and stock them in Fourier image
cvMerge(image_Re, image_Im, 0, 0, Fourier);
// forward Fourier transform
cvDFT(Fourier, dst, CV_DXT_FORWARD);
cvReleaseImage(&image_Re);
cvReleaseImage(&image_Im);
cvReleaseImage(&Fourier);
}
http://i60.tinypic.com/5xvmhi.png