My code seems to have a bug somewhere but I just can't catch it. I'm passing a 2d array to three sequential functions. First function populates it, second function modifies the values to 1's and 0's, the third function counts the 1's and 0's. I can access the array easily inside the first two functions, but I get an access violation at the first iteration of the third one.
Main
text_image_data = new int*[img_height];
for (i = 0; i < img_height; i++) {
text_image_data[i] = new int[img_width];
}
cav_length = new int[numb_of_files];
// Start processing - load each image and find max cavity length
for (proc = 0; proc < numb_of_files; proc++)
{
readImage(filles[proc], text_image_data, img_height, img_width);
threshold = makeBinary(text_image_data, img_height, img_width);
cav_length[proc] = measureCavity(bullet[0], img_width, bullet[1], img_height, text_image_data);
}
Functions
int makeBinary(int** img, int height, int width)
{
int threshold = 0;
unsigned long int sum = 0;
for (int k = 0; k < width; k++)
{
sum = sum + img[1][k] + img[2][k] + img[3][k] + img[4][k] + img[5][k];
}
threshold = sum / (width * 5);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
img[i][j] = img[i][j] > threshold ? 1 : 0;
}
}
return threshold;
}
// Count pixels - find length of cavity here
int measureCavity(int &x, int& width, int &y, int &height, int **img)
{
double mean = 1.;
int maxcount = 0;
int pxcount = 0;
int i = x - 1;
int j;
int pxsum = 0;
for (j = 0; j < height - 2; j++)
{
while (mean > 0.0)
{
for (int ii = i; ii > i - 4; ii--)
{
pxsum = pxsum + img[ii][j] + img[ii][j + 1];
}
mean = pxsum / 4.;
pxcount += 2;
i += 2;
pxsum = 0;
}
maxcount = std::max(maxcount, pxcount);
pxcount = 0;
j++;
}
return maxcount;
}
I keep getting an access violation in the measureCavity() function. I'm passing and accessing the array text_image_data the same way as in makeBinary() and readImage(), and it works just fine for those functions. The size is [550][70], I'm getting the error when trying to access [327][0].
Is there a better, more reliable way to pass this array between the functions?
I am trying to make an alphatrimmed filter in openCV library. My code is not working properly and the resultant image is not looking as image after filtering.
The filter should work in the following way.
Chossing some (array) of pixels in my example it is 9 pixels '3x3' window.
Ordering them in increasing way.
Cutting our 'array' both sides for alpha-2.
calculating arithmetic mean of remaining pixels and inserting them in proper place.
int alphatrimmed(Mat img, int alpha)
{
Mat img9 = img.clone();
const int start = alpha/2 ;
const int end = 9 - (alpha/2);
//going through whole image
for (int i = 1; i < img.rows - 1; i++)
{
for (int j = 1; j < img.cols - 1; j++)
{
uchar element[9];
Vec3b element3[9];
int k = 0;
int a = 0;
//selecting elements for window 3x3
for (int m = i -1; m < i + 2; m++)
{
for (int n = j - 1; n < j + 2; n++)
{
element3[a] = img.at<Vec3b>(m*img.cols + n);
a++;
for (int c = 0; c < img.channels(); c++)
{
element[k] += img.at<Vec3b>(m*img.cols + n)[c];
}
k++;
}
}
//comparing and sorting elements in window (uchar element [9])
for (int b = 0; b < end; b++)
{
int min = b;
for (int d = b + 1; d < 9; d++)
{
if (element[d] < element[min])
{
min = d;
const uchar temp = element[b];
element[b] = element[min];
element[min] = temp;
const Vec3b temporary = element3[b];
element3[b] = element3[min];
element3[min] = temporary;
}
}
}
// index in resultant image( after alpha-trimmed filter)
int result = (i - 1) * (img.cols - 2) + j - 1;
for (int l = start ; l < end; l++)
img9.at<Vec3b>(result) += element3[l];
img9.at<Vec3b>(result) /= (9 - alpha);
}
}
namedWindow("AlphaTrimmed Filter", WINDOW_AUTOSIZE);
imshow("AlphaTrimmed Filter", img9);
return 0;
}
Without actual data, it's somewhat of a guess, but an uchar can't hold the sum of 3 channels. It works modulo 256 (at least on any platform OpenCV supports).
The proper solution is std::sort with a proper comparator for your Vec3b :
void L1(Vec3b a, Vec3b b) { return a[0]+a[1]+a[2] < b[0]+b[1]+b[2]; }
By the OpenCV library, I want to threshold an image like this:
threshold(image, thresh, 220, 255, THRESH_BINARY_INV)
But I want to automatically find the threshold value (220).
I use Otsu to estimate the threshold. But it doesn't work in my case.
therefore, I should use Histogram Peak Technique. I want to find the two peaks in the histogram corresponding to the background and object of the image. It sets the threshold value automatically halfway between the two peaks.
I use this book (pages: 117 and 496-505): "Image Processing in C" by Dwayne Phillips (http://homepages.inf.ed.ac.uk/rbf/BOOKS/PHILLIPS/). And I use source code for find the two peaks in the histogram corresponding to the background and object of the image. this is my image:
this is my c++ code:
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <stdio.h>
#include <fstream>
using namespace std;
using namespace cv;
int main()
{
Mat image0 = imread("C:/Users/Alireza/Desktop/contrast950318/2.bmp");
imshow("image0", image0);
Mat image, thresh, Tafrigh;
cvtColor(image0, image, CV_RGB2GRAY);
int N = image.rows*image.cols;
int histogram[256];
for (int i = 0; i < 256; i++) {
histogram[i] = 0;
}
//create histo
for (int i = 0; i < image.rows; i++){
for (int j = 0; j < image.cols; j++){
histogram[((int)image.at<uchar>(i, j))]++;
}
}
int peak1, peak2;
#define PEAKS 30
int distance[PEAKS], peaks[PEAKS][2];
int i, j = 0, max = 0, max_place = 0;
for (int i = 0; i<PEAKS; i++){
distance[i] = 0;
peaks[i][0] = -1;
peaks[i][1] = -1;
}
for (i = 0; i <= 255; i++){
max = histogram[i];
max_place = i;
//insert_into_peaks(peaks, max, max_place);
//int max, max_place, peaks[PEAKS][2];
//int i, j;
/* first case */
if (max > peaks[0][0]){
for (i = PEAKS - 1; i > 0; i--){
peaks[i][0] = peaks[i - 1][0];
peaks[i][1] = peaks[i - 1][1];
}
peaks[0][0] = max;
peaks[0][1] = max_place;
} /* ends if */
/* middle cases */
for (j = 0; j < PEAKS - 3; j++){
if (max < peaks[j][0] && max > peaks[j + 1][0]){
for (i = PEAKS - 1; i > j + 1; i--){
peaks[i][0] = peaks[i - 1][0];
peaks[i][1] = peaks[i - 1][1];
}
peaks[j + 1][0] = max;
peaks[j + 1][1] = max_place;
} /* ends if */
} /* ends loop over j */
/* last case */
if (max < peaks[PEAKS - 2][0] &&
max > peaks[PEAKS - 1][0]){
peaks[PEAKS - 1][0] = max;
peaks[PEAKS - 1][1] = max_place;
} /* ends if */
}/* ends loop over i */
for (int i = 1; i<PEAKS; i++){
distance[i] = peaks[0][1] - peaks[i][1];
if (distance[i] < 0)
distance[i] = distance[i] * (-1);
}
peak1 = peaks[0][1];
cout << " peak1= " << peak1;
for (int i = PEAKS - 1; i > 0; i--){
if (distance[i] > 1)
peak2 = peaks[i][1];
}
cout << " peak2= " << peak2;
int mid_point;
//int peak1, peak2;
short hi, low;
unsigned long sum1 = 0, sum2 = 0;
if (peak1 > peak2)
mid_point = ((peak1 - peak2) / 2) + peak2;
if (peak1 < peak2)
mid_point = ((peak2 - peak1) / 2) + peak1;
for (int i = 0; i<mid_point; i++)
sum1 = sum1 + histogram[i];
for (int i = mid_point; i <= 255; i++)
sum2 = sum2 + histogram[i];
if (sum1 >= sum2){
low = mid_point;
hi = 255;
}
else{
low = 0;
hi = mid_point;
}
cout << " low= " << low << " hi= " << hi;
double threshnum = 0.5* (low + hi);
threshold(image, thresh, threshnum, hi, THRESH_BINARY_INV);
waitKey(0);
return 0;
}
But I don't know this code correct is or not. If it correct, why is threshold value 202?
What ideas on how to solve this task would you suggest? Or on what resource on the internet can I find help?
You can use also the Max Entropy. In some cases using only the high frequency of the entropy could be better
int maxentropie(const cv::Mat1b& src)
{
// Histogram
cv::Mat1d hist(1, 256, 0.0);
for (int r=0; r<src.rows; ++r)
for (int c=0; c<src.cols; ++c)
hist(src(r,c))++;
// Normalize
hist /= double(src.rows * src.cols);
// Cumulative histogram
cv::Mat1d cumhist(1, 256, 0.0);
float sum = 0;
for (int i = 0; i < 256; ++i)
{
sum += hist(i);
cumhist(i) = sum;
}
cv::Mat1d hl(1, 256, 0.0);
cv::Mat1d hh(1, 256, 0.0);
for (int t = 0; t < 256; ++t)
{
// low range entropy
double cl = cumhist(t);
if (cl > 0)
{
for (int i = 0; i <= t; ++i)
{
if (hist(i) > 0)
{
hl(t) = hl(t) - (hist(i) / cl) * log(hist(i) / cl);
}
}
}
// high range entropy
double ch = 1.0 - cl; // constraint cl + ch = 1
if (ch > 0)
{
for (int i = t+1; i < 256; ++i)
{
if (hist(i) > 0)
{
hh(t) = hh(t) - (hist(i) / ch) * log(hist(i) / ch);
}
}
}
}
// choose best threshold
cv::Mat1d entropie(1, 256, 0.0);
double h_max = hl(0) + hh(0);
int threshold = 0;
entropie(0) = h_max;
for (int t = 1; t < 256; ++t)
{
entropie(t) = hl(t) + hh(t);
if (entropie(t) > h_max)
{
h_max = entropie(t);
threshold = uchar(t);
}
}
if(threshold==0) threshold=255;
return threshold;
}
Here is my following code.
double calculateImageEntropy(Mat image, int sizeReduction)
{
// Manually calculating the histogram.
long double imageHistogram[256];
fill_n(imageHistogram, 256, 0);
int tempPixelValue = 0;
long double grayTotal = ((image.cols - sizeReduction) * (image.rows - sizeReduction));
for (int a = 0 + sizeReduction; a < image.cols - sizeReduction; a++)
{
for (int b = 0 + sizeReduction; b < image.rows - sizeReduction; b++)
{
tempPixelValue = image.at<uchar>(b, a);
imageHistogram[tempPixelValue] = (imageHistogram[tempPixelValue] + (1 / grayTotal));
}
}
/*long double normHist[256];
fill_n(normHist, 255, 0);
normalize(imageHistogram, normHist, 0, image.rows, NORM_MINMAX, -1, Mat());
*/
long double total = 0;
for (int i = 0; i < 255; i++)
{
total = total + imageHistogram[i];
}
cout << total << "\n\n";
long double tempValue = 0;
long double tempEntropy = 0;
long double cumulativeEntropy = 0;
long double finalEntropy = 0;
for (int y = 0; y < 255; y++)
{
tempValue = imageHistogram[y];
if (tempValue != 0)
{
tempEntropy = tempValue * log2(tempValue);
cumulativeEntropy = cumulativeEntropy + tempEntropy;
}
else;
}
finalEntropy = cumulativeEntropy * (-1);
return finalEntropy;
}
I'm using OpenCV to load images and trying to calculate the entropy of an image. I want to normalize the images so that way bigger images won't have a bigger entropy because of size.
But I can't find why my normalised histogram doesn't equal to 1.
Does anyone have any ideas?
Thanks
I am new to c++ and I have no real idea why my program crashes only guesses.
The following program suddenly started to crash on line 49 at the void saveSig(cv::Mat *frame) line it self
without even steping in to the function it self.
It ran fine before.
The program soposed to track a person in a video under certain circumstances which I will not go over since they haven't been impllementet yet.
I can only guess that I have ran out of stack and I'm not sure why, again it might be a leek that I missed or maybe I just ran out of stack space or maybe it's completely something else and very stupid.
PS: sorry if the code is not "pretty" I'm really new to C++ and OpenCV and I will appreciate any comments about "bad coding practice".
#include "myCVFunctions.h"
#include <vector>
#define LOADING_VIDEO_ERROR -1
#define LOADING_BACKGROUND_IMAGE_ERROR -2
#define FRAME_BUFFER_SIZE 10
#define SIG_BUFFER_SIZE 6
const cv::string g_c_videoFilePath = "res/tmp.mp4";
const cv::string g_c_bgFilePath = "res/bg.jpg";
const cv::Mat g_c_bg = cv::imread(g_c_bgFilePath);
const cv::Rect g_c_entranceROIRect(869, 999, 345, 80);
const cv::Rect g_c_largeEntranceROIRect(869, 340, 345, 740);
const cv::Rect g_c_sigROI(869,539,345,541);
cv::Mat g_currFrameBackup;
cv::Point g_clickCoords(-1,-1);
cv::Rect g_markedROI;
bool g_trace = false;
bool g_personInside = false;
bool g_useSig = false;
char g_sigCount = 0;
double g_sig[SIG_BUFFER_SIZE];
double g_newSig[SIG_BUFFER_SIZE];
cv::Point g_inSigHeadCoords[SIG_BUFFER_SIZE];
cv::Point g_inNewSigHeadCoords[SIG_BUFFER_SIZE];
long double av1 = 0;
long double av2 = 0;
double minDiff = 9999999999.999999;
void onMouse(int event, int x, int y, int flags, void* userdata){
if(event == CV_EVENT_LBUTTONDOWN){
g_clickCoords.x = x;
g_clickCoords.y = y;
}
if(event == CV_EVENT_MOUSEMOVE && g_clickCoords.x>=0){
g_markedROI = cv::Rect(g_clickCoords, cv::Point(x,y));
g_currFrameBackup.copyTo(*((cv::Mat*)userdata));
cv::rectangle(*((cv::Mat*)userdata), g_markedROI, cv::Scalar(0,255,0));
}
if(event == CV_EVENT_LBUTTONUP){
g_trace = true;
g_useSig = true;
g_clickCoords = cv::Point(-1,-1);
}
}
void saveSig(cv::Mat *frame){ //the crash occurs here
double fftData[512*512];
cv::Mat sigROI, sigHSV, resized;
sigROI = (*frame)(g_c_sigROI);
cv::cvtColor(sigROI, sigHSV, CV_BGR2HSV);
resized = my_cv::resize_zeros(sigHSV, cv::Size(512,512));
cv::MatIterator_<cv::Vec3b> m_it = resized.begin<cv::Vec3b>();
for(int i=0; m_it!=resized.end<cv::Vec3b>(); m_it++, i++){
fftData[i] = (*m_it)[2];
}
my_cv::FFTR fft = my_cv::createFFTR<double>(fftData, 512, 512, FFT_TYPE_2D);
//cv::flip(sigHSV, sigHSV, -1);
//cv::transpose(sigHSV, sigHSV);
//cv::flip(sigHSV, sigHSV, 0);
//cv::imshow("1", sigROI);
//cv::imshow("", sigHSV);
//cv::waitKey();
//resized = my_cv::resize_zeros(sigHSV, cv::Size(512,512));
//m_it = resized.begin<cv::Vec3b>();
//for(int i=0; m_it!=resized.end<cv::Vec3b>(); m_it++, i++){
// fftData[i] = (*m_it)[2];
//}
//my_cv::FFTR fft180 = my_cv::createFFTR<double>(fftData, 512, 512, FFT_TYPE_2D);
my_cv::FFTR multFFT = my_cv::multFFT(fft, fft);
my_cv::m_reverseFFTR(multFFT, FFT_TYPE_2D);
if(g_useSig){
g_newSig[g_sigCount] = my_cv::getFFTAverege(multFFT);
}else{
g_sig[g_sigCount] = my_cv::getFFTAverege(multFFT);
}
g_sigCount++;
if(g_sigCount>=SIG_BUFFER_SIZE&&g_useSig){
av1 = ((g_sig[0]+g_sig[1]+g_sig[2]+g_sig[3]+g_sig[4]+g_sig[5])/6)/1000000.0;
av2 = ((g_newSig[0]+g_newSig[1]+g_newSig[2]+g_newSig[3]+g_newSig[4]+g_newSig[5])/6)/1000000.0;
/*for(int i=0; i<SIG_BUFFER_SIZE; i++){
for(int j=0; j<SIG_BUFFER_SIZE; j++){
double diff = abs(g_newSig[i]-g_sig[j]);
minDiff = (diff<minDiff ? diff : minDiff);
}
}*/
my_cv::deleteFFTR(fft);
//my_cv::deleteFFTR(fft180);
my_cv::deleteFFTR(multFFT);
}
}
void proccesFrame(cv::Mat *frame){
cv::Mat grayFrame, negativeFrame, bwFrame, entranceROI;
negativeFrame = g_c_bg - *frame;
cv::cvtColor(negativeFrame, grayFrame, CV_BGR2GRAY);
cv::threshold(grayFrame, bwFrame, 30, 255, cv::THRESH_BINARY);
cv::Mat erode = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(7,7));
cv::Mat dilate = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(10,10));
cv::erode(bwFrame, bwFrame, erode);
cv::dilate(bwFrame, bwFrame, dilate);
entranceROI = bwFrame(g_c_largeEntranceROIRect);
cv::MatIterator_<uchar> m_it = entranceROI.begin<uchar>();
for(g_personInside = false; m_it!=entranceROI.end<uchar>(); m_it++){
if(*m_it==255){
g_personInside = true;
break;
}
}
if(!g_personInside){
g_trace = false;
g_sigCount = 0;
av1 = 0;
av2 = 0;
minDiff = 9999999999.999999;
}else{
if(g_sigCount<SIG_BUFFER_SIZE){
cv::Mat ROI = bwFrame(g_c_entranceROIRect);
cv::MatIterator_<uchar> bw_it = bwFrame.begin<uchar>();
if(!g_useSig){
for(int i=0; bw_it!=bwFrame.end<uchar>(); bw_it++, i++){
if(*bw_it==255){
g_inSigHeadCoords[g_sigCount] = cv::Point(i%bwFrame.cols, i/bwFrame.cols);
break;
}
}
}else{
for(int i=0; bw_it!=bwFrame.end<uchar>(); bw_it++, i++){
if(*bw_it==255){
g_inNewSigHeadCoords[g_sigCount] = cv::Point(i%bwFrame.cols, i/bwFrame.cols);
break;
}
}
}
saveSig(frame);
}
cv::putText(*frame, "Person inside", cv::Point(20,120), CV_FONT_HERSHEY_PLAIN, 3.0, cv::Scalar(0,255,0), 2);
if(g_useSig&&g_sigCount>=SIG_BUFFER_SIZE){
g_sig;
g_newSig;
g_sigCount++;
//g_trace = true;
}
if(g_trace){
std::vector<std::vector<cv::Point>> contours;
std::vector<cv::Vec4i> hierarchy;
findContours(bwFrame, contours, hierarchy, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
std::vector<std::vector<cv::Point>>::iterator o_it = contours.begin();
for(; o_it!=contours.end(); o_it++){
std::vector<cv::Point>::iterator i_it = (*o_it).begin();
for(; i_it!=(*o_it).end()-1; i_it++){
cv::line(*frame, *i_it, *(i_it+1), cv::Scalar(0,255,0) , 3);
}
}
}
}
}
int main(int argc, char* argv[]){
//init//////////////////////////////////////////////////////////////////////
cv::VideoCapture videoBuffer(g_c_videoFilePath);
if(!videoBuffer.isOpened()){
std::cerr << "Can't load video please check the paths\n";
return LOADING_VIDEO_ERROR;
}
if(!g_c_bg.data){
std::cerr << "Can't load background image please check the paths\n";
return LOADING_BACKGROUND_IMAGE_ERROR;
}
std::vector<cv::Mat> frameBuffer;
frameBuffer.resize(FRAME_BUFFER_SIZE);
const std::vector<cv::Mat>::iterator currFrame = frameBuffer.begin();
const cv::string mainWindow = "Object Tracker";
cv::namedWindow(mainWindow, CV_WINDOW_AUTOSIZE);
cv::setMouseCallback(mainWindow, onMouse, (void*)&(*currFrame));
//init end/////////////////////////////////////////////////////////////////////////////
//video loop///////////////////////////////////////////////////////////////////////////
for(char paused = 0;;){
paused = (cv::waitKey(20)==' ' ? 1 : 0);
while(paused){
cv::resize(*currFrame, *currFrame, cv::Size(900, 540));
cv::imshow(mainWindow, *currFrame);
paused = (cv::waitKey(20)==' ' ? 0 : 1);
}
cv::Mat frame;
videoBuffer.read(frame);
frame.copyTo(g_currFrameBackup);
frameBuffer.pop_back();
frameBuffer.insert(frameBuffer.begin(), frame);
std::stringstream ss;
ss << "Frame: " << videoBuffer.get(CV_CAP_PROP_POS_FRAMES);
cv::putText(*currFrame, ss.str().c_str(), cv::Point(20,70), CV_FONT_HERSHEY_PLAIN, 3.0, cv::Scalar(0,255,0), 2);
proccesFrame(&(*currFrame));
/*if(g_personInside){
cv::resize(*currFrame, *currFrame, cv::Size(900, 540));
while(cv::waitKey(40)!=' ')
cv::imshow(mainWindow, *currFrame);
}*/
cv::resize(*currFrame, *currFrame, cv::Size(900, 540));
cv::imshow(mainWindow, *currFrame);
}
//video loop end///////////////////////////////////////////////////////////////////////
return 0;
}
and the "myCVFunctions.h" file:
#pragma once
#include "opencv\cv.h"
#include "opencv\highgui.h"
#include "fftw3.h"
#define FFT_TYPE_1D 1
#define FFT_TYPE_2D 2
namespace my_cv{
struct myComplex{
double real;
double imag;
};
struct FFTR{
myComplex** data;
int cols;
int rows;
};
struct ENTROPR{
double** data;
int cols;
int rows;
};
void printFFTR(FFTR fft);
FFTR createFFTR(cv::Mat mGrey, int type){
FFTR result;
result.rows = mGrey.rows, result.cols = mGrey.cols;
result.data = new myComplex*[result.cols];
for(int i = 0; i<result.cols; i++)
result.data[i] = new myComplex[result.rows];
fftw_complex *in, *out;
fftw_plan p;
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
switch(type){
case FFT_TYPE_1D:
p = fftw_plan_dft_1d(result.rows*result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
case FFT_TYPE_2D:
p = fftw_plan_dft_2d(result.rows, result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
}
cv::MatIterator_<uchar> mGrey_it = mGrey.begin<uchar>();
for(int i=0; mGrey_it != mGrey.end<uchar>(); mGrey_it++, i++){
in[i][0] = *mGrey_it;
in[i][1] = 0;
}
fftw_execute(p);
for(int i=0; i<result.rows*result.cols; i++){
int x = i%result.cols, y = i/result.cols;
result.data[x][y].real = out[i][0];
result.data[x][y].imag = out[i][1];
}
fftw_destroy_plan(p);
fftw_free(in);
fftw_free(out);
return result;
}
template<class T> FFTR createFFTR(const T* const mat, int cols, int rows, int type){
FFTR result;
result.rows = rows, result.cols = cols;
result.data = new myComplex*[result.cols];
for(int i = 0; i<result.cols; i++)
result.data[i] = new myComplex[result.rows];
fftw_complex *in, *out;
fftw_plan p;
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * result.rows * result.cols);
switch(type){
case FFT_TYPE_1D:
p = fftw_plan_dft_1d(result.rows*result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
case FFT_TYPE_2D:
p = fftw_plan_dft_2d(result.rows, result.cols, in, out, FFTW_FORWARD, FFTW_ESTIMATE);
break;
}
for(int i=0; i<cols*rows; i++){
in[i][0] = mat[i];
in[i][1] = 0;
}
fftw_execute(p);
for(int i=0; i<result.rows*result.cols; i++){
int x = i%result.cols, y = i/result.cols;
result.data[x][y].real = out[i][0];
result.data[x][y].imag = out[i][1];
}
fftw_destroy_plan(p);
fftw_free(in);
fftw_free(out);
return result;
}
void m_reverseFFTR(FFTR fft, int type){
fftw_complex *in, *out;
fftw_plan p;
int scaleFactor = fft.cols*fft.rows;
in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * fft.rows * fft.cols);
out = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * fft.rows * fft.cols);
switch(type){
case FFT_TYPE_1D:
p = fftw_plan_dft_1d(fft.rows*fft.cols, in, out, FFTW_BACKWARD, FFTW_ESTIMATE);
break;
case FFT_TYPE_2D:
p = fftw_plan_dft_2d(fft.rows, fft.cols, in, out, FFTW_BACKWARD, FFTW_ESTIMATE);
break;
}
for(int j=0; j<fft.rows; j++)
for(int i=0; i<fft.cols; i++){
int index = j*fft.cols+i;
in[index][0] = fft.data[i][j].real;
in[index][1] = fft.data[i][j].imag;
}
fftw_execute(p);
for(int i=0; i<fft.rows*fft.cols; i++){
int x = i%fft.cols, y = i/fft.cols;
fft.data[x][y].real = out[i][0]/scaleFactor;
fft.data[x][y].imag = out[i][1]/scaleFactor;
}
fftw_destroy_plan(p);
fftw_free(in);
fftw_free(out);
}
FFTR multFFT(const FFTR fft1, const FFTR fft2){
FFTR result;
result.cols = fft1.cols;
result.rows = fft1.rows;
result.data = new myComplex*[result.cols];
for(int i=0; i<result.cols; i++)
result.data[i] = new myComplex[result.rows];
for(int i=0; i<result.cols; i++){
for(int j=0; j<result.rows; j++){
result.data[i][j].real = (fft1.data[i][j].real*fft2.data[i][j].real)-(fft1.data[i][j].imag*fft2.data[i][j].imag);
result.data[i][j].imag = (fft1.data[i][j].real*fft2.data[i][j].imag)+(fft1.data[i][j].imag*fft2.data[i][j].real);
}
}
return result;
}
long double getFFTAverege(FFTR fft){
long double result = 0;
for(int i=0; i<fft.cols; i++){
long double sum=0;
for(int j=0; j<fft.rows; j++){
sum += fft.data[i][j].real;
}
result += sum/fft.rows;
}
return result/fft.rows;
}
void deleteFFTR(FFTR fftr){
for(int i=0; i<fftr.cols; i++)
if(fftr.data[i]) delete [] fftr.data[i];
if(fftr.data) delete [] fftr.data;
}
void printFFTR(FFTR fft){
for(int j=0; j<fft.rows; j++){
for(int i=0; i<fft.cols; i++){
printf("%f%si%f\n", fft.data[i][j].real, (fft.data[i][j].imag<0 ? "-" : "+"), abs(fft.data[i][j].imag));
}
}
}
cv::Mat resize_zeros(const cv::Mat src, cv::Size newSize){
cv::Mat srcROI, result, resultROI;
result.create(newSize, src.type());
srcROI = src(cv::Rect(0,0,(src.cols>result.cols ? result.cols : src.cols), (src.rows>result.rows ? result.rows : src.rows)));
result = 0;
resultROI = result(cv::Rect(0,0, srcROI.cols, srcROI.rows));
srcROI.copyTo(resultROI);
return result;
}
//otsu's threshhold
template<class T> T getThreshold(cv::Mat mGrey){
uchar* image = mGrey.data;
int columns = mGrey.cols;
int rows = mGrey.rows;
const T SIGMA = 0.000001;
const int num_bins = 257;
int counts[num_bins] = {0};
T p[num_bins] = {0};
T mu[num_bins] = {0};
T omega[num_bins] = {0};
T sigma_b_squared[num_bins] = {0};
int sumC;
// calculate histogram
for(int i = 0; i < rows*columns; i++)
counts[image[i]]++;
sumC = 0;
for(int i = 0; i < num_bins; i++)
sumC += counts[i];
for(int i = 0; i < num_bins; i++)
p[i] = ((T)counts[i])/sumC;
mu[0] = omega[0] = p[0];
for(int i = 1; i < num_bins; i++){
omega[i] = omega[i-1] + p[i];
mu[i] = mu[i-1] + p[i]*(i+1);
}
T mu_t = mu[num_bins-1];
T maxval = -1.0;
for(int i = 0; i < num_bins; i++){
T v = mu_t * omega[i] - mu[i];
if (omega[i] > SIGMA && abs(1.0-omega[i]) > SIGMA){
sigma_b_squared[i] = v*v/(omega[i]* (1.0 - omega[i]));
maxval = std::max(maxval,sigma_b_squared[i]);
}
}
// Find the location of the maximum value of sigma_b_squared.
// The maximum may extend over several bins, so average together the
// locations.
// If maxval == -1, sigma_b_squared is not defined, then return 0.
T level = 0;
if (maxval > 0){
T idx = 0;
int maxNumbers = 0;
for(int i = 0; i < num_bins; i++){
if (sigma_b_squared[i] == maxval){
idx += i;
maxNumbers++;
}
}
if (maxNumbers >= 0){
idx /= maxNumbers;
// Normalize the threshold to the range [0, 1].
// level = (idx - 1) / (num_bins - 1);
level = idx / (num_bins - 1);
}
}
return level;
}
}
double fftData[512*512];
That's (probably) 2MB of data, which is (probably) too big to fit on the stack. The simplest fix is to use a dynamic array instead:
std::vector<double> fftData(512*512);
Alternatively, if dynamic allocation is too expensive, you could use a static or global array. This is usually a bad idea, since it makes the function non-reentrant and awkward to use in a multi-threaded program; however, you already have so many globals that one more probably won't hurt.