Apologizing if the answer is on the site somewhere (couldn't find). I'm a hobbyist who tries to load WAV file, get it's magnitude and phase data (for modification), generate spectrogram and then save it back as a new WAV file.
I use C++ (Qt) and FFTW library.
My problem is that resulting WAV differs from original even when no modifications are made. If FFT operations are performed on whole sample sequence, then it looks just like the original. But I have to use STFTs with overlapping windows. In this case I get distortions resulting in periodic cracking/throttling sounds, also waveform of audio is significantly changed.
This can be seen in following examples (viewed in Audacity):
original / processed in one chunk:
original
processed (windowSize=2048, hopSize=1024, no window function):
processed ws=2048, hs=1024, wf=none
I can't post more examples with my reputation, but performing Hamming window function after ISTFT (not before STFT) with the method I use to combine resulting windowed samples gives good sound. But waveform is still quite different, mainly significant loss in peaks is observed.
I think the way I combine result of ISTFT into new sample sequence is the problem. What is the proper way to do this? Example in C++ would be really appreciated.
EDIT *
As correctly pointed out by SleuthEye I made a mistake in code.
Code is adjusted. Waveform and sound seems to be perfect now even without applying a window function. Still, is the method correct for such an operation?
Here's the relevant source:
// getSampleNormalized(n) returns sample n of 1 channel in -1.0 to 1.0 range
// getSampleCount() returns sample count of 1 channel
// quint32 is just unsigned int
quint32 windowSize = 2048;
quint32 windowSizeHalf = windowSize / 2 + 1;
quint32 slideWindowBy = 1024; // hopSize
quint32 windowCount = getSampleCount() / slideWindowBy;
if ( (windowCount * slideWindowBy) < getSampleCount()){
windowCount += 1;
}
quint32 newSampleCount = windowCount * slideWindowBy + ( windowSize - slideWindowBy );
double *window = new double[windowSize];
fftw_complex *fftResult = new fftw_complex[windowSizeHalf];
fftw_complex *fftWindow = new fftw_complex[windowSizeHalf];
double *result = new double[windowSize];
double **magnitudes = new double*[windowCount];
double **phases = new double*[windowCount];
double **signalWindows = new double*[windowCount];
for (int i = 0; i < windowCount; ++i){
magnitudes[i] = new double[windowSizeHalf];
phases[i] = new double[windowSizeHalf];
signalWindows[i] = new double[windowSize];
}
double *sampleSignals = new double[newSampleCount];
fftw_plan fftPlan = fftw_plan_dft_r2c_1d( windowSize, window, fftResult, FFTW_ESTIMATE );
fftw_plan ifftPlan = fftw_plan_dft_c2r_1d( windowSize, fftWindow, result, FFTW_ESTIMATE );
// STFT
for ( int currentWindow = 0; currentWindow < windowCount; ++currentWindow ){
for (int i = 0; i < windowSize; ++i){
quint32 currentSample = currentWindow * slideWindowBy + i;
if ( ( currentSample ) < getSampleCount() ){
window[i] = getSampleNormalized( currentSample ); // * ( windowHamming( i, windowSize ) );
}
else{
window[i] = 0.0;
}
}
fftw_execute(fftPlan);
for (int i = 0; i < windowSizeHalf; ++i){
magnitudes[currentWindow][i] = sqrt( fftResult[i][0]*fftResult[i][0] + fftResult[i][1]*fftResult[i][1] );
phases[currentWindow][i] = atan2( fftResult[i][1], fftResult[i][0] );
}
}
// INVERSE STFT
for ( int currentWindow = 0; currentWindow < windowCount; ++currentWindow ){
for ( int i = 0; i < windowSizeHalf; ++i ){
fftWindow[i][0] = magnitudes[currentWindow][i] * cos( phases[currentWindow][i] ); // Real
fftWindow[i][1] = magnitudes[currentWindow][i] * sin( phases[currentWindow][i] ); // Imaginary
}
fftw_execute(ifftPlan);
for ( int i = 0; i < windowSize; ++i ){
signalWindows[currentWindow][i] = result[i] / windowSize; // getting normalized result
//signalWindows[currentWindow][i] *= (windowHamming( i, windowSize )); // applying Hamming window function
}
}
quint32 pos;
// HERE WE COMBINE RESULTED WINDOWS
// COMBINE AND AVERAGE
// 1st window should be full replace
for ( int i = 0; i < windowSize; ++i ){
sampleSignals[i] = signalWindows[0][i];
}
// 2nd window and onwards: combine with previous ones
for ( int currentWindow = 1; currentWindow < windowCount; ++currentWindow ){
// combine and average with data from previous window
for ( int i = 0; i < (windowSize - slideWindowBy); ++i ){
pos = currentWindow * slideWindowBy + i;
sampleSignals[pos] = (sampleSignals[pos] + signalWindows[currentWindow][i]) * 0.5;
}
// simply replace for the rest
for ( int i = (windowSize - slideWindowBy); i < windowSize; ++i ){
pos = currentWindow * slideWindowBy + i;
sampleSignals[pos] = signalWindows[currentWindow][i];
}
}
// then just save the wav file...
Related
I am attempting to write a naive implementation of the Short-Time Fourier Transform using consecutive FFT frames in time, calculated using the FFTW library, but I am getting a Segmentation fault and cannot work out why.
My code is as below:
// load in audio
AudioFile<double> audioFile;
audioFile.load ("assets/example-audio/file_example_WAV_1MG.wav");
int N = audioFile.getNumSamplesPerChannel();
// make stereo audio mono
double fileDataMono[N];
if (audioFile.isStereo())
for (int i = 0; i < N; i++)
fileDataMono[i] = ( audioFile.samples[0][i] + audioFile.samples[1][i] ) / 2;
// setup stft
// (test transform, presently unoptimized)
int stepSize = 512;
int M = 2048; // fft size
int noOfFrames = (N-(M-stepSize))/stepSize;
// create Hamming window vector
double w[M];
for (int m = 0; m < M; m++) {
w[m] = 0.53836 - 0.46164 * cos( 2*M_PI*m / M );
}
double* input;
// (pads input array if necessary)
if ( (N-(M-stepSize))%stepSize != 0) {
noOfFrames += 1;
int amountOfZeroPadding = stepSize - (N-(M-stepSize))%stepSize;
double ipt[N + amountOfZeroPadding];
for (int i = 0; i < N; i++) // copy values from fileDataMono into input
ipt[i] = fileDataMono[i];
for (int i = 0; i < amountOfZeroPadding; i++)
ipt[N + i] = 0;
input = ipt;
} else {
input = fileDataMono;
}
// compute stft
fftw_complex* stft[noOfFrames];
double frames[noOfFrames][M];
fftw_plan fftPlan;
for (int i = 0; i < noOfFrames; i++) {
stft[i] = (fftw_complex*)fftw_malloc(sizeof(fftw_complex) * M);
for (int m = 0; m < M; m++)
frames[i][m] = input[i*stepSize + m] * w[m];
fftPlan = fftw_plan_dft_r2c_1d(M, frames[i], stft[i], FFTW_ESTIMATE);
fftw_execute(fftPlan);
}
// compute istft
double* outputFrames[noOfFrames];
double output[N];
for (int i = 0; i < noOfFrames; i++) {
outputFrames[i] = (double*)fftw_malloc(sizeof(double) * M);
fftPlan = fftw_plan_dft_c2r_1d(M, stft[i], outputFrames[i], FFTW_ESTIMATE);
fftw_execute(fftPlan);
for (int m = 0; i < M; m++) {
output[i*stepSize + m] += outputFrames[i][m];
}
}
fftw_destroy_plan(fftPlan);
for (int i = 0; i < noOfFrames; i++) {
fftw_free(stft[i]);
fftw_free(outputFrames[i]);
}
// output audio
AudioFile<double>::AudioBuffer outputBuffer;
outputBuffer.resize (1);
outputBuffer[0].resize(N);
outputBuffer[0].assign(output, output+N);
bool ok = audioFile.setAudioBuffer(outputBuffer);
audioFile.setAudioBufferSize (1, N);
audioFile.setBitDepth (16);
audioFile.setSampleRate (8000);
audioFile.save ("out/audioOutput.wav");
The segfault seems to be being raised by the first fftw_malloc when computing the forward STFT.
Thanks in advance!
The relevant bit of code is:
double* input;
if ( (N-(M-stepSize))%stepSize != 0) {
double ipt[N + amountOfZeroPadding];
//...
input = ipt;
}
//...
input[i*stepSize + m];
Your input pointer points at memory that exists only inside the if statement. The closing brace denotes the end of the lifetime of the ipt array. When dereferencing the pointer later, you are addressing memory that no longer exists.
everyone I am trying to implement patter matching with FFT but I am not sure what the result should be (I think I am missing something even though a read a lot of stuff about the problem and tried a lot of different implementations this one is the best so far). Here is my FFT correlation function.
void fft2d(fftw_complex**& a, int rows, int cols, bool forward = true)
{
fftw_plan p;
for (int i = 0; i < rows; ++i)
{
p = fftw_plan_dft_1d(cols, a[i], a[i], forward ? FFTW_FORWARD : FFTW_BACKWARD, FFTW_ESTIMATE);
fftw_execute(p);
}
fftw_complex* t = (fftw_complex*)fftw_malloc(rows * sizeof(fftw_complex));
for (int j = 0; j < cols; ++j)
{
for (int i = 0; i < rows; ++i)
{
t[i][0] = a[i][j][0];
t[i][1] = a[i][j][1];
}
p = fftw_plan_dft_1d(rows, t, t, forward ? FFTW_FORWARD : FFTW_BACKWARD, FFTW_ESTIMATE);
fftw_execute(p);
for (int i = 0; i < rows; ++i)
{
a[i][j][0] = t[i][0];
a[i][j][1] = t[i][1];
}
}
fftw_free(t);
}
int findCorrelation(int argc, char* argv[])
{
BMP bigImage;
BMP keyImage;
BMP result;
RGBApixel blackPixel = { 0, 0, 0, 1 };
const bool swapQuadrants = (argc == 4);
if (argc < 3 || argc > 4) {
cout << "correlation img1.bmp img2.bmp" << endl;
return 1;
}
if (!keyImage.ReadFromFile(argv[1])) {
return 1;
}
if (!bigImage.ReadFromFile(argv[2])) {
return 1;
}
//Preparations
const int maxWidth = std::max(bigImage.TellWidth(), keyImage.TellWidth());
const int maxHeight = std::max(bigImage.TellHeight(), keyImage.TellHeight());
const int rowsCount = maxHeight;
const int colsCount = maxWidth;
BMP bigTemp = bigImage;
BMP keyTemp = keyImage;
keyImage.SetSize(maxWidth, maxHeight);
bigImage.SetSize(maxWidth, maxHeight);
for (int i = 0; i < rowsCount; ++i)
for (int j = 0; j < colsCount; ++j) {
RGBApixel p1;
if (i < bigTemp.TellHeight() && j < bigTemp.TellWidth()) {
p1 = bigTemp.GetPixel(j, i);
} else {
p1 = blackPixel;
}
bigImage.SetPixel(j, i, p1);
RGBApixel p2;
if (i < keyTemp.TellHeight() && j < keyTemp.TellWidth()) {
p2 = keyTemp.GetPixel(j, i);
} else {
p2 = blackPixel;
}
keyImage.SetPixel(j, i, p2);
}
//Here is where the transforms begin
fftw_complex **a = (fftw_complex**)fftw_malloc(rowsCount * sizeof(fftw_complex*));
fftw_complex **b = (fftw_complex**)fftw_malloc(rowsCount * sizeof(fftw_complex*));
fftw_complex **c = (fftw_complex**)fftw_malloc(rowsCount * sizeof(fftw_complex*));
for (int i = 0; i < rowsCount; ++i) {
a[i] = (fftw_complex*)fftw_malloc(colsCount * sizeof(fftw_complex));
b[i] = (fftw_complex*)fftw_malloc(colsCount * sizeof(fftw_complex));
c[i] = (fftw_complex*)fftw_malloc(colsCount * sizeof(fftw_complex));
for (int j = 0; j < colsCount; ++j) {
RGBApixel p1;
p1 = bigImage.GetPixel(j, i);
a[i][j][0] = (0.299*p1.Red + 0.587*p1.Green + 0.114*p1.Blue);
a[i][j][1] = 0.0;
RGBApixel p2;
p2 = keyImage.GetPixel(j, i);
b[i][j][0] = (0.299*p2.Red + 0.587*p2.Green + 0.114*p2.Blue);
b[i][j][1] = 0.0;
}
}
fft2d(a, rowsCount, colsCount);
fft2d(b, rowsCount, colsCount);
result.SetSize(maxWidth, maxHeight);
for (int i = 0; i < rowsCount; ++i)
for (int j = 0; j < colsCount; ++j) {
fftw_complex& y = a[i][j];
fftw_complex& x = b[i][j];
double u = x[0], v = x[1];
double m = y[0], n = y[1];
c[i][j][0] = u*m + n*v;
c[i][j][1] = v*m - u*n;
int fx = j;
if (fx>(colsCount / 2)) fx -= colsCount;
int fy = i;
if (fy>(rowsCount / 2)) fy -= rowsCount;
float r2 = (fx*fx + fy*fy);
const double cuttoffCoef = (maxWidth * maxHeight) / 37992.;
if (r2<128 * 128 * cuttoffCoef)
c[i][j][0] = c[i][j][1] = 0;
}
fft2d(c, rowsCount, colsCount, false);
const int halfCols = colsCount / 2;
const int halfRows = rowsCount / 2;
if (swapQuadrants) {
for (int i = 0; i < halfRows; ++i)
for (int j = 0; j < halfCols; ++j) {
std::swap(c[i][j][0], c[i + halfRows][j + halfCols][0]);
std::swap(c[i][j][1], c[i + halfRows][j + halfCols][1]);
}
for (int i = halfRows; i < rowsCount; ++i)
for (int j = 0; j < halfCols; ++j) {
std::swap(c[i][j][0], c[i - halfRows][j + halfCols][0]);
std::swap(c[i][j][1], c[i - halfRows][j + halfCols][1]);
}
}
for (int i = 0; i < rowsCount; ++i)
for (int j = 0; j < colsCount; ++j) {
const double& g = c[i][j][0];
RGBApixel pixel;
pixel.Alpha = 0;
int gInt = 255 - static_cast<int>(std::floor(g + 0.5));
pixel.Red = gInt;
pixel.Green = gInt;
pixel.Blue = gInt;
result.SetPixel(j, i, pixel);
}
BMP res;
res.SetSize(maxWidth, maxHeight);
result.WriteToFile("result.bmp");
return 0;
}
Sample output
This question would probably be more appropriately posted on another site like cross validated (metaoptimize.com used to also be a good one, but it appears to be gone)
That said:
There's two similar operations you can perform with FFT: convolution and correlation. Convolution is used for determining how two signals interact with each-other, whereas correlation can be used to express how similar two signals are to each-other. Make sure you're doing the right operation as they're both commonly implemented throught a DFT.
For this type of application of DFTs you usually wouldn't extract any useful information in the fourier spectrum unless you were looking for frequencies common to both data sources or whatever (eg, if you were comparing two bridges to see if their supports are spaced similarly).
Your 3rd image looks a lot like the power domain; normally I see the correlation output entirely grey except where overlap occurred. Your code definitely appears to be computing the inverse DFT, so unless I'm missing something the only other explanation I've come up with for the fuzzy look could be some of the "fudge factor" code in there like:
if (r2<128 * 128 * cuttoffCoef)
c[i][j][0] = c[i][j][1] = 0;
As for what you should expect: wherever there are common elements between the two images you'll see a peak. The larger the peak, the more similar the two images are near that region.
Some comments and/or recommended changes:
1) Convolution & correlation are not scale invariant operations. In other words, the size of your pattern image can make a significant difference in your output.
2) Normalize your images before correlation.
When you get the image data ready for the forward DFT pass:
a[i][j][0] = (0.299*p1.Red + 0.587*p1.Green + 0.114*p1.Blue);
a[i][j][1] = 0.0;
/* ... */
How you grayscale the image is your business (though I would've picked something like sqrt( r*r + b*b + g*g )). However, I don't see you doing anything to normalize the image.
The word "normalize" can take on a few different meanings in this context. Two common types:
normalize the range of values between 0.0 and 1.0
normalize the "whiteness" of the images
3) Run your pattern image through an edge enhancement filter. I've personally made use of canny, sobel, and I think I messed with a few others. As I recall, canny was "quick'n dirty", sobel was more expensive, but I got comparable results when it came time to do correlation. See chapter 24 of the "dsp guide" book that's freely available online. The whole book is worth your time, but if you're low on time then at a minimum chapter 24 will help a lot.
4) Re-scale the output image between [0, 255]; if you want to implement thresholds, do it after this step because the thresholding step is lossy.
My memory on this one is hazy, but as I recall (edited for clarity):
You can scale the final image pixels (before rescaling) between [-1.0, 1.0] by dividing off the largest power spectrum value from the entire power spectrum
The largest power spectrum value is, conveniently enough, the center-most value in the power spectrum (corresponding to the lowest frequency)
If you divide it off the power spectrum, you'll end up doing twice the work; since FFTs are linear, you can delay the division until after the inverse DFT pass to when you're re-scaling the pixels between [0..255].
If after rescaling most of your values end up so black you can't see them, you can use a solution to the ODE y' = y(1 - y) (one example is the sigmoid f(x) = 1 / (1 + exp(-c*x) ), for some scaling factor c that gives better gradations). This has more to do with improving your ability to interpret the results visually than anything you might use to programmatically find peaks.
edit I said [0, 255] above. I suggest you rescale to [128, 255] or some other lower bound that is gray rather than black.
I'm learning some basic image processing and using a gray scale BMP file to work some algorithms but I'd like to convert my code to put out color BMP files instead of gray scale. I'm using the EasyBMP library and have the following to read in and write to my BMP file:
bool Image::readFromBMPFile(const std::string & inputFileName){
bool success = true;
// use BMP object to read image
BMP inputImage;
success = inputImage.ReadFromFile(inputFileName.c_str() );
if( success ){
// allocate memory for image (deleting old, if exists)
m_numRows = inputImage.TellHeight();
m_numCols = inputImage.TellWidth();
if( m_pixels != NULL ){
// deallocate old memory
delete [] m_pixels;
}
m_pixels = new double[m_numRows * m_numCols];
// copy pixels
for( int r = 0; r < m_numRows; ++r ){
for( int c = 0; c < m_numCols; ++c ){
RGBApixel pixelVal = inputImage.GetPixel(c, r);
double val = (double) pixelVal.Blue + (double) pixelVal.Green + (double) pixelVal.Red;
val = (val / 3.0 + 0.5);
m_pixels[r * m_numCols + c] = val;
}
}
}
return success;
}
bool Image::writeToBMPFile(const std::string & outputFileName){
bool success = true;
if( m_pixels != NULL ){
// create bitmap image
BMP outputImage;
outputImage.SetSize(m_numCols, m_numRows);
outputImage.SetBitDepth( 24 );
double maxVal = m_pixels[0];
double minVal = m_pixels[0];
// Maximum and minimum values
for( int i = 1; i < m_numRows * m_numCols; ++i ){
if( m_pixels[i] > maxVal ){
maxVal = m_pixels[i];
}
if( m_pixels[i] <= minVal ){
minVal = m_pixels[i];
}
}
for( int r = 0; r < m_numRows; ++r ){
for( int c = 0; c < m_numCols; ++c ){
// get pixel value and clamp between 0 and 255
double val = 255.0 * (m_pixels[r * m_numCols + c] - minVal) / (maxVal - minVal);
if( val < 0 ){
val = 0;
}
if( val > 255 ){
val = 255;
}
// set output color based on mapping
RGBApixel pixelVal;
pixelVal.Blue = (int)val;
pixelVal.Green = (int)val;
pixelVal.Red = (int)val;
outputImage.SetPixel(c, r, pixelVal);
}
}
// write to file
success = outputImage.WriteToFile( outputFileName.c_str() );
} else {
success = false;
}
return success;
}
What kind of steps would I try to make my program compatible with RGB images?
I'm trying to mix some audio samples with the following algorithm:
short* FilterGenerator::mixSources(std::vector<RawData>rawsources, int numframes)
{
short* output = new short[numframes * 2]; // multiply 2 for channels
for (int sample = 0; sample < numframes * 2; ++sample)
{
for (int sourceCount = 0; sourceCount < rawsources.size(); ++sourceCount)
{
if (sample <= rawsources.at(sourceCount).frames * 2)
{
short outputSample = rawsources.at(sourceCount).data[sample];
output[sample] += outputSample;
}
}
}
// post mixing volume compression
for (int sample = 0; sample < numframes; ++sample)
{
output[sample] /= (float)rawsources.size();
}
return output;
}
I get the output I want except for the fact that when one of the sources are done, the other sources start playing louder. I know why this is but I don't know how to solve it properly.
Also, this is a screenshot from Audacity from the audio I output:
As you can see there's definitely something wrong. You can see that the audio hasn't got zero at the center anymore and you can see the audio getting louder once one of the sources are done playing.
Most of all I'd like to fix the volume problem but any other tweaks I can do are very appreciated!
Some extra info: I know that this code doesn't allow mono sources but that's ok. I'm only going to use stereo interleaved audio samples.
Usually mixing don't divide by the number of sources. This mean that mix a normal track with a mute track can halve its amplitude. If you want you can eventually normalize the track so that it is in his range.
The code is not tested, there may be errors:
#include <algorithm> // for std::max
#include <cmath> // for std::fabs
short* FilterGenerator::mixSources(std::vector<RawData>rawsources, int numframes)
{
// We can not use shorts immediately because can overflow
// I use floats because in the renormalization not have distortions
float *outputFloating = new float [numframes * 2];
// The maximum of the absolute value of the signal
float maximumOutput = 0;
for (int sample = 0; sample < numframes * 2; ++sample)
{
// makes sure that at the beginning is zero
outputFloating[sample] = 0;
for (int sourceCount = 0; sourceCount < rawsources.size(); ++sourceCount)
{
// I think that should be a '<'
if (sample < rawsources.at(sourceCount).frames * 2)
outputFloating[sample] += rawsources.at(sourceCount).data[sample];
}
// Calculates the maximum
maximumOutput = std::max (maximumOutput, std::fabs(outputFloating[sample]));
}
// A short buffer
short* output = new short [numframes * 2]; // multiply 2 for channels
float multiplier = maximumOutput > 32767 ? 32767 / maximumOutput : 1;
// Renormalize the track
for (int sample = 0; sample < numframes * 2; ++sample)
output[sample] = (short) (outputFloating[sample] * multiplier);
delete[] outputFloating;
return output;
}
Since you're adding up everything into a short before you divide, you're probably getting overflow. You need to add to an intermediary that's bigger. Also the final scaling shouldn't be dependent on the number of samples, it should be a constant - determine it before you call your function.
short* FilterGenerator::mixSources(std::vector<RawData>rawsources, int numframes, double gain = 0.5)
{
short* output = new short[numframes * 2]; // multiply 2 for channels
for (int sample = 0; sample < numframes * 2; ++sample)
{
long newSample = 0;
for (int sourceCount = 0; sourceCount < rawsources.size(); ++sourceCount)
{
if (sample <= rawsources.at(sourceCount).frames * 2)
{
short outputSample = rawsources.at(sourceCount).data[sample];
newSample += outputSample;
}
}
output[sample] = (short)(newSample * gain);
}
return output;
}
You don't really have to do the "post mixing volume compression". Simply add up all the sources and don't allow the sum to overflow. This should work:
short* FilterGenerator::mixSources(std::vector<RawData>rawsources, int numframes)
{
short* output = new short[numframes * 2]; // multiply 2 for channels
for (int sample = 0; sample < numframes * 2; ++sample)
{
long sum = 0;
for (int sourceCount = 0; sourceCount < rawsources.size(); ++sourceCount)
{
if (sample < rawsources.at(sourceCount).frames * 2)
{
short outputSample = rawsources.at(sourceCount).data[sample];
sum += outputSample;
output[sample] += outputSample;
}
if (sum > 32767) sum = 32767;
if (sum < -32768) sum = -32768;
output[sample] = (short)sum;
}
}
return output;
}
I have points in an image. I need to detect the most collinear points. The fastest method is to use Hough transform, but I have to modify the opencv method. Actually I need that the semi collinear points to be returned with detected line, for this reason I modified the polar line struct. A tolerance threshold is also needed to detect nearly detected points as shown in the image. Can someone help in how to tune this threshold?
I need at least four semi collinear points to detect the line to which they belong.
The points of first image were detected by 6 overlapped lines
the point of middle images were detected by nothing
the third's points
were detected by three lines
Which is the best way to get rid from the overlapped liens?? Or how to tune the tolerance threshold to detect the semi collinear points by only one line?
the is my own function call:
vector<CvLinePolar2> lines;
CvMat c_image = source1; // loaded image
HoughLinesStandard(&c_image,1,CV_PI/180,4,&lines,INT_MAX);
typedef struct CvLinePolar2
{
float rho;
float angle;
vector<CvPoint> points;
};
void HoughLinesStandard( const CvMat* img, float rho, float theta,
int threshold, vector<CvLinePolar2> *lines, int linesMax= INT_MAX )
{
cv::AutoBuffer<int> _accum, _sort_buf;
cv::AutoBuffer<float> _tabSin, _tabCos;
const uchar* image;
int step, width, height;
int numangle, numrho;
int total = 0;
int i, j;
float irho = 1 / rho;
double scale;
vector<vector<CvPoint>> lpoints;
CV_Assert( CV_IS_MAT(img) && CV_MAT_TYPE(img->type) == CV_8UC1 );
image = img->data.ptr;
step = img->step;
width = img->cols;
height = img->rows;
numangle = cvRound(CV_PI / theta);
numrho = cvRound(((width + height) * 2 + 1) / rho);
_accum.allocate((numangle+2) * (numrho+2));
_sort_buf.allocate(numangle * numrho);
_tabSin.allocate(numangle);
_tabCos.allocate(numangle);
int *accum = _accum, *sort_buf = _sort_buf;
float *tabSin = _tabSin, *tabCos = _tabCos;
memset( accum, 0, sizeof(accum[0]) * (numangle+2) * (numrho+2) );
//memset( lpoints, 0, sizeof(lpoints) );
lpoints.resize(sizeof(accum[0]) * (numangle+2) * (numrho+2));
float ang = 0;
for(int n = 0; n < numangle; ang += theta, n++ )
{
tabSin[n] = (float)(sin(ang) * irho);
tabCos[n] = (float)(cos(ang) * irho);
}
// stage 1. fill accumulator
for( i = 0; i < height; i++ )
for( j = 0; j < width; j++ )
{
if( image[i * step + j] != 0 )
{
CvPoint pt;
pt.x = j; pt.y = i;
for(int n = 0; n < numangle; n++ )
{
int r = cvRound( j * tabCos[n] + i * tabSin[n] );
r += (numrho - 1) / 2;
int ind = (n+1) * (numrho+2) + r+1;
int s = accum[ind];
accum[ind]++;
lpoints[ind].push_back(pt);
}
}
}
// stage 2. find local maximums
for(int r = 0; r < numrho; r++ )
for(int n = 0; n < numangle; n++ )
{
int base = (n+1) * (numrho+2) + r+1;
if( accum[base] > threshold &&
accum[base] > accum[base - 1] && accum[base] >= accum[base + 1] &&
accum[base] > accum[base - numrho - 2] && accum[base] >= accum[base + numrho + 2] )
sort_buf[total++] = base;
}
// stage 3. sort the detected lines by accumulator value
icvHoughSortDescent32s( sort_buf, total, accum );
// stage 4. store the first min(total,linesMax) lines to the output buffer
linesMax = MIN(linesMax, total);
scale = 1./(numrho+2);
for( i = 0; i < linesMax; i++ )
{
CvLinePolar2 line;
int idx = sort_buf[i];
int n = cvFloor(idx*scale) - 1;
int r = idx - (n+1)*(numrho+2) - 1;
line.rho = (r - (numrho - 1)*0.5f) * rho;
line.angle = n * theta;
line.points = lpoints[idx];
lines->push_back(line);
}
}
One approach is non-maximal suppression to thin out the candidate set for potential lines. Once you've identified the thinned potential lines you could then compute an average of the remaining lines that would satisfy some angular or spatial difference threshold.
Try HoughLinesP..opencv reference