matlab to C++/openCV normalization function - c++

here is my matlab code:
imageData = imageData ./ toolbox.c3d.p.tprctile(imageData(xy),99.2);
imageData(imageData>1) = 1;
here is my openCV/c++ code, the matrix dst is an openCV matrix
cv::Mat dst
std::vector<float> result;
for (std::vector<int>::iterator it = index.begin() ; it != index.end(); ++it)
{
int ind = *it;
float temp = dst.at<float>(ind - 1);
result.push_back(temp);
}
float divider = tprctile(result,99.2);
dst = dst/ divider;
utility function for percentile
float Utils::tprctile(std::vector<float> channel, double pt)
{
std::sort(channel.begin(),channel.end());
int ptInd = Utilities::MatlabRound (pt/100 * channel.size() );
return channel[ptInd];
// Matlab code
// function val = tprctile(data, pt)
// data = sort(data);
// ptInd = round( pt/100 * length(data) );
// val = data(ptInd);
}
my question is regarding imageData(imageData>1) = 1
what is the most efficient way to implement this function - i can of course iterate through dst like I did. is there a better way?

What you want is to truncate the image with cv::threshold.
The following should do what you require:
cv::threshold(dst, dst, 1, 1, CV_THRESH_TRUNC);
This truncates all values above 1 and stores the result in dst.
http://docs.opencv.org/modules/imgproc/doc/miscellaneous_transformations.html?highlight=threshold#threshold

this is what i'm doing at the moment
int matrixSize = dst.rows *dst.cols;
cv::MatConstIterator_<float> it = dst.begin<float>(), it_end = dst.end<float>();
for(int i = 0 ; i < matrixSize ; ++i, ++it)
{
float value = *it;
if(value > 1.0)
{
dst.at<float>(i) = 1.0;
}
}

Related

Spectral clusting in DLIB C++

I would like to cluster an BW image using the k means clustering algorithm that I found in the DLIB library and later the spectral clustering algorithm.
The result that i have at the moment is very strange (at least for me) and I would be grateful for any help.
Original image:
The current result for now is:
cv::Mat inputImage = cv::imread("lixo.png");
cv::cvtColor(inputImage, inputImage, CV_RGB2GRAY);
cv::imshow("Display Image", inputImage);
cv::waitKey(0);
// from cv::mat to dlib vector of points
dlib::matrix<double,2,1> sample_point;
std::vector<dlib::matrix<double,2,1>> samples;
for( long i = 0; i < inputImage.rows; ++i) {
for( long j = 0; j < inputImage.cols; ++j ) {
if (inputImage.at<uchar>(i,j) == (uchar)255) {
sample_point(0) = i;
sample_point(1) = j;
samples.push_back(sample_point);
}
}
}
// typedef for the kind of kernel we want to use
typedef dlib::radial_basis_kernel<dlib::matrix<double,2,1>> kernel_type;
// the kcentroid object
dlib::kcentroid<kernel_type> kc(kernel_type(0.1),0.01, 8);
// kkmeans object and tell it to use kcentroid objects
dlib::kkmeans<kernel_type> test(kc);
// tell the kkmeans we want 3 clusters
int nclus = 3;
test.set_number_of_centers(nclus);
// pick some initial centers for the k-means algorithm
std::vector<dlib::matrix<double,2,1>> initial_centers;
pick_initial_centers(nclus,initial_centers, samples,test.get_kernel());
// now run the k-means algorithm on our set of samples
test.train(samples, initial_centers);
// show result
int r = inputImage.rows;
int c = inputImage.cols;
cv::Mat result1 = cv::Mat::zeros(r, c, CV_8UC1);
cv::Mat result2 = cv::Mat::zeros(r, c, CV_8UC1);
cv::Mat result3 = cv::Mat::zeros(r, c, CV_8UC1);
int n1 = 0;
int n2 = 0;
int n3 = 0;
std::cout << " Result" << std::endl;
for (long i = 0; i < samples.size(); ++i) {
sample_point = samples[i];
int result = test(sample_point);
if(result == 0) {
n1++;
result1.at<uchar>(sample_point(0), sample_point(1)) = (uchar)255;
} else if(result == 1) {
n2++;
result2.at<uchar>(sample_point(0), sample_point(1)) = (uchar)255;
} else if(result == 2) {
n3++;
result3.at<uchar>(sample_point(0), sample_point(1)) = (uchar)255;
}
}
cv::imshow("result1", result1);
cv::imshow("result2", result2);
cv::imshow("result3", result3);
cv::waitKey(0);

Find the index and value of column maximum in openCV matrix

This is the original MATLAB implementation
function[m, p] = max2(im)
[m1, k1] = max(im);
[m, k2] = max(m1);
x = k2;
y = k1(k2);
p = [y, x];
It is being used inside this functionality
for r = 2.^linspace(log2(minR),log2(maxR),numSteps);
itestSeek = imresize(itestBase,minR/r);
icorr = normxcorr2(cc,itestSeek);
[m,p] = max2(icorr); //here
if (m>bestm)
bestp = p*r;
bests = ccSize*r;
bestm = m;
end;
end;
Here is my OpenCV 3.0.0/ c++ implementation
void Utilities::Max2(cv::Mat input_image, double& m, std::vector<int>& p)
{
std::vector<double> m1(input_image.cols); // the local maximum for each column
std::vector<int> k1(input_image.cols); // the index of the local maximum
for (int c = 0; c < input_image.cols; ++c)
{
float temp_max = input_image.at<float>(0, c);
int temp_index = 0;
for (int r = 0; r < input_image.rows; ++r)
{
if (temp_max < input_image.at<float>(r, c))
{
temp_max = input_image.at<float>(r, c);
temp_index = r;
}
}
m1[c] = temp_max;
k1[c] = temp_index;
}
auto iter = std::max_element(m1.begin(), m1.end()); //max of all the local maximum;
m = *iter;
int k2 = std::distance(m1.begin(), iter);
double y = k1[k2];
p.push_back(y);
p.push_back(k2);
}
c++ usage of the function
std::vector<double> best_p;
std::vector<double> best_s;
for (double i = 0; i < linspace_vector.size(); i++)
{
cv::Mat i_test_seek;
cv::Mat i_corr;
double r = linspace_vector[i];
double resize_factor = min_r / r; // minR/r in matlab
cv::resize(i_test_base, i_test_seek, cv::Size(), resize_factor, resize_factor, cv::INTER_CUBIC);
cv::matchTemplate(i_test_seek, cc_template, i_corr, CV_TM_CCORR_NORMED);
cv::imshow("i_corr", i_corr);
cv::waitKey(0);
double m;
std::vector<int> p;
Utilities::Max2(i_corr, m, p);
if (m> best_m)
{
best_p.clear();
best_s.clear();
for (int i = 0; i < p.size(); ++i)
{
best_p.push_back(p[i] * r);
}
best_s.push_back(cc_size_height * r);
best_s.push_back(cc_size_width * r);
best_m = m;
}
}
Can you suggest a more efficient way of doing this?
I find the local maximum for each column and the index of that value.
Later I find the global maximum of all of the indices.
Can you try the following and benchmark, if the performance increases:
#include <limits>
void Utilities::Max2(cv::Mat input_image, double& m, std::vector<int>& p)
{
m = std::numeric_limits<double>::min;
std::pair<int, int> temp_index = 0;
for (int r = 0; r < input_image.rows; ++r)
{
for (int c = 0; c < input_image.cols; ++c)
{
if (m < input_image.at<float>(r, c))
{
m = input_image.at<float>(r, c);
temp_index = std::make_pair(c, r);
}
}
}
p[0] = temp_index.second;
p[1] = temp_index.first;
}
If there is a way to get the input as a vector and you can get the number col columns, for example using:
int cols = input_image.rows;
std::vector<double> v;
v.assign(input_image.datastart, input_image.dataend);
Then you can compute in just one go:
std::vector<double>::iterator iter = std::max_element(v.begin(), v.end());
double m = *iter;
int k = std::distance(v.begin(), iter);
int y = (int)k / cols;
int x = k % cols;
However, I am not sure if getting the data as a vector is an option nor the performance of convert it into a vector. Maybe you can run and see how it compares to your implementation.
The first piece of code is essentially finding the max value and its indices (both x and y) in an image to my understanding.
function[m, p] = max2(im)
[m1, k1] = max(im); %find the max value in each col
[m, k2] = max(m1); %find the max value among maxes
x = k2; %find the "row" of the max value
y = k1(k2); %and its "col"
p = [y, x];
This can be done using some iterations but iteration is almost always significantly slower than vector operations or Opencv functions.
So, if my understanding is correct, this operation can simply be done by
double minVal, maxVal;
Point minLoc, maxLoc;
minMaxLoc(im, &minVal, &maxVal, &minLoc, &maxLoc);
maxLoc.y will give the row, and maxLoc.x will give col.
update: Your Matlab code can also be simplified (which potentially will speed up too)
[mx, ind] = max(im(:));
p = [rem(ind,size(im,1)) ceil(ind/size(im,1))];
You could also try the following:
// creating a random matrix with 2 rows and 4 columns
Mat1d mat(2, 4);
double low = -7000.0; // minimum value for generating random numbers
double high = +7000.0; // maximum value for generating random numbers
randu(mat, Scalar(low), Scalar(high)); // generating random number matrix
double max_element = *std::max_element(mat.begin(),mat.end()); // get the max element in the matrix
int max_element_index = std::max_element(mat.begin(),mat.end()) - mat.begin(); // get the max_element_index from the matrix`
The max element index is a row major order value starting from 0 until number of items in the matrix, in this case 7,
cout << mat << endl;
cout << max_element << endl;
cout << max_element_index << endl;
[Referred Generate random numbers matrix in OpenCV for the code above]

Implement RGBtoHSV C++ , wrong H output

I am trying to do Sobel operator in the HSV dimension (told to do this in the HSV by my guide but I dont understand why it will work better on HSV than on RGB) .
I have built a function that converts from RGB to HSV . while I have some mediocre knowledge in C++ I am getting confused by the Image Processing thus I tried to keep the code as simple as possible , meaning I dont care (at this stage) about time nor space .
From looking on the results I got in gray levels bmp photos , my V and S seems to be fine but my H looks very gibbrish .
I got 2 questions here :
1. How a normal H photo in gray level should look a like comparing to the source photo ?
2. Where was I wrong in the code :
void RGBtoHSV(unsigned char image[][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS],
float Him[][NUMBER_OF_COLUMNS],
float Vim[][NUMBER_OF_COLUMNS],
float Sim[][NUMBER_OF_COLUMNS])
{
double Rn, Gn, Bn;
double C;
double H, S, V;
for (int row = 0; row < NUMBER_OF_ROWS; row++)
{
for (int column = 0; column < NUMBER_OF_COLUMNS; column++)
{
Rn = (1.0*image[row][column][R]) / 255;
Gn = (1.0*image[row][column][G] )/ 255;
Bn = (1.0*image[row][column][B] )/ 255;
//double RGBn[3] = { Rn, Gn, Bn };
double max = Rn;
if (max < Gn) max = Gn;
if (max < Bn) max = Bn;
double min = Rn;
if (min > Gn) min = Gn;
if (min > Bn) min = Bn;
C = max - min;
H = 0;
if (max==0)
{
S = 0;
H = -1; //undifined;
V = max;
}
else
{
/* if (max == Rn)
H = (60.0* ((int)((Gn - Bn) / C) % 6));
else if (max == Gn)
H = 60.0*( (Bn - Rn)/C + 2);
else
H = 60.0*( (Rn - Gn)/C + 4);
*/
if (max == Rn)
H = ( 60.0* ( (Gn - Bn) / C) ) ;
else if (max == Gn)
H = 60.0*((Bn - Rn) / C + 2);
else
H = 60.0*((Rn - Gn) / C + 4);
V = max; //AKA lightness
S = C / max; //saturation
}
while (H < 0)
H += 360;
while (H>360)
H -= 360;
Him[row][column] = (float)H;
Vim[row][column] = (float)V;
Sim[row][column] = (float)S;
}
}
}
also my hsvtorgb :
void HSVtoRGB(unsigned char image[][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS],
float Him[][NUMBER_OF_COLUMNS],
float Vim[][NUMBER_OF_COLUMNS],
float Sim[][NUMBER_OF_COLUMNS])
{
double R1, G1, B1;
double C;
double V;
double S;
double H;
int Htag;
double Htag2;
double x;
double m;
for (int row = 0; row < NUMBER_OF_ROWS; row++)
{
for (int column = 0; column < NUMBER_OF_COLUMNS; column++)
{
H = (double)Him[row][column];
S = (double)Sim[row][column];
V = (double)Vim[row][column];
C = V*S;
Htag = (int) (H / 60.0);
Htag2 = H/ 60.0;
//x = C*(1 - abs(Htag % 2 - 1));
double tmp1 = fmod(Htag2, 2);
double temp=(1 - abs(tmp1 - 1));
x = C*temp;
//switch (Htag)
switch (Htag)
{
case 0 :
R1 = C;
G1 = x;
B1 = 0;
break;
case 1:
R1 = x;
G1 = C;
B1 = 0;
break;
case 2:
R1 = 0;
G1 = C;
B1 = x;
break;
case 3:
R1 = 0;
G1 = x;
B1 = C;
break;
case 4:
R1 = x;
G1 = 0;
B1 = C;
break;
case 5:
R1 = C;
G1 = 0;
B1 = x;
break;
default:
R1 = 0;
G1 = 0;
B1 = 0;
break;
}
m = V - C;
//this is also good change I found
//image[row][column][R] = unsigned char( (R1 + m)*255);
//image[row][column][G] = unsigned char( (G1 + m)*255);
//image[row][column][B] = unsigned char( (B1 + m)*255);
image[row][column][R] = round((R1 + m) * 255);
image[row][column][G] = round((G1 + m) * 255);
image[row][column][B] = round((B1 + m) * 255);
}
}
}
void HSVfloattoGrayconvert(unsigned char grayimage[NUMBER_OF_ROWS] [NUMBER_OF_COLUMNS], float hsvimage[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS], char hsv)
{
//grayimage , flaotimage , h/s/v
float factor;
if (hsv == 'h' || hsv == 'H') factor = (float) 1 / 360;
else factor = 1;
for (int row = 0; row < NUMBER_OF_ROWS; row++)
{
for (int column = 0; column < NUMBER_OF_COLUMNS; column++)
{
grayimage[row][column] = (unsigned char) (0.5f + 255.0f * (float)hsvimage[row][column] / factor);
}
}
}
and my main:
unsigned char ColorImage1[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS] [NUMBER_OF_COLORS];
float Himage[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
float Vimage[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
float Simage[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char ColorImage2[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS] [NUMBER_OF_COLORS];
unsigned char HimageGray[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char VimageGray[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char SimageGray[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char HAfterSobel[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char VAfterSobel[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char SAfterSobal[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char HSVcolorAfterSobal[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS];
unsigned char RGBAfterSobal[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS];
int KernelX[3][3] = {
{-1,0,+1}, {-2,0,2}, {-1,0,1 }
};
int KernelY[3][3] = {
{-1,-2,-1}, {0,0,0}, {1,2,1}
};
void main()
{
//work
LoadBgrImageFromTrueColorBmpFile(ColorImage1, "P22A.bmp");
// add noise
AddSaltAndPepperNoiseRGB(ColorImage1, 350, 255);
StoreBgrImageAsTrueColorBmpFile(ColorImage1, "saltandpepper.bmp");
AddGaussNoiseCPPstileRGB(ColorImage1, 0.0, 1.0);
StoreBgrImageAsTrueColorBmpFile(ColorImage1, "Saltandgauss.bmp");
//saves hsv in float array
RGBtoHSV(ColorImage1, Himage, Vimage, Simage);
//saves hsv float arrays in unsigned char arrays
HSVfloattoGrayconvert(HimageGray, Himage, 'h');
HSVfloattoGrayconvert(VimageGray, Vimage, 'v');
HSVfloattoGrayconvert(SimageGray, Simage, 's');
StoreGrayImageAsGrayBmpFile(HimageGray, "P22H.bmp");
StoreGrayImageAsGrayBmpFile(VimageGray, "P22V.bmp");
StoreGrayImageAsGrayBmpFile(SimageGray, "P22S.bmp");
WaitForUserPressKey();
}
edit : Changed Code + add sources for equations :
Soruce : for equations :
http://www.rapidtables.com/convert/color/hsv-to-rgb.htm
http://www.rapidtables.com/convert/color/rgb-to-hsv.htm
edit3:
listening to #gpasch advice and using better reference and deleting the mod6 I am now able to restore the RGB original photo!!! but unfortunately now my H photo in grayscale is even more chaotic than before .
I'll edit the code about so it will have more info about how I am saving the H grayscale photo .
That is the peril of going through garbage web sites; I suggest the following:
https://www.cs.rit.edu/~ncs/color/t_convert.html
That mod 6 seems fishy there.
You also need to make sure you understand that H is in degrees from 0 to 360; if your filter expects 0..1 you have the change.
I am trying to do Sobel operator in the HSV dimension (told to do this in the HSV by my guide but I dont understand why it will work better on HSV than on RGB)
It depends on what you are trying to achieve. If you're trying to do edge detection based on brightness for example, then just working with say the V channel might be simpler than processing all three channels of RGB and combining them afterwards.
How a normal H photo in gray level should look a like comparing to the source photo ?
You would see regions which are a similar colour appear as a similar shade of grey, and for a real-world scene you would still see gradients. But where there are spatially adjacent regions with colours far apart in hue, there would be a sharp jump. The shapes would generally be recognisable though.
Where was I wrong in the code :
There are two main problems with your code. The first is that the hue scaling in HSVfloattoGrayconvert is wrong. Your code is setting factor=1.0/360.0f but then dividing by the factor, which means it's multiplying by 360. If you simply multiply by the factor, it produces the expected output. This is because the earlier calculation uses normalised values (0..1) for S and V but angle in degrees for H, so you need to divide by 360 to normalise H.
Second, the conversion back to RGB has a problem, mainly to do with calculating Htag where you want the original value for calculating x but the floor only when switching on the sector.
Note that despite what #gpasch suggested, the mod 6 operation is actually correct. This is because the conversion you are using is based on the hexagonal colour space model for HSV, and this is used to determine which sector your colour is in. For a continuous model, you could use a radial conversion instead which is slightly different. Both are well explained on Wikipedia.
I took your code, added a few functions to generate input data and save output files so it is completely standalone, and fixed the bugs above while making minimal changes to the source.
Given the following generated input image:
the Hue channel extracted is:
The saturation channel is:
and finally value:
After fixing up the HSV to RGB conversion, I verified that the resulting output image matches the original.
The updated code is below (as mentioned above, changed minimally to make a standalone test):
#include <string>
#include <cmath>
#include <cstdlib>
enum ColorIndex
{
R = 0,
G = 1,
B = 2,
};
namespace
{
const unsigned NUMBER_OF_COLUMNS = 256;
const unsigned NUMBER_OF_ROWS = 256;
const unsigned NUMBER_OF_COLORS = 3;
};
void RGBtoHSV(unsigned char image[][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS],
float Him[][NUMBER_OF_COLUMNS],
float Vim[][NUMBER_OF_COLUMNS],
float Sim[][NUMBER_OF_COLUMNS])
{
double Rn, Gn, Bn;
double C;
double H, S, V;
for (int row = 0; row < NUMBER_OF_ROWS; row++)
{
for (int column = 0; column < NUMBER_OF_COLUMNS; column++)
{
Rn = image[row][column][R] / 255.0;
Gn = image[row][column][G] / 255.0;
Bn = image[row][column][B] / 255.0;
double max = Rn;
if (max < Gn) max = Gn;
if (max < Bn) max = Bn;
double min = Rn;
if (min > Gn) min = Gn;
if (min > Bn) min = Bn;
C = max - min;
H = 0;
if (max==0)
{
S = 0;
H = 0; // Undefined
V = max;
}
else
{
if (max == Rn)
H = 60.0*fmod((Gn - Bn) / C, 6.0);
else if (max == Gn)
H = 60.0*((Bn - Rn) / C + 2);
else
H = 60.0*((Rn - Gn) / C + 4);
V = max; //AKA lightness
S = C / max; //saturation
}
while (H < 0)
H += 360.0;
while (H > 360)
H -= 360.0;
Him[row][column] = (float)H;
Vim[row][column] = (float)V;
Sim[row][column] = (float)S;
}
}
}
void HSVtoRGB(unsigned char image[][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS],
float Him[][NUMBER_OF_COLUMNS],
float Vim[][NUMBER_OF_COLUMNS],
float Sim[][NUMBER_OF_COLUMNS])
{
double R1, G1, B1;
double C;
double V;
double S;
double H;
double Htag;
double x;
double m;
for (int row = 0; row < NUMBER_OF_ROWS; row++)
{
for (int column = 0; column < NUMBER_OF_COLUMNS; column++)
{
H = (double)Him[row][column];
S = (double)Sim[row][column];
V = (double)Vim[row][column];
C = V*S;
Htag = H / 60.0;
double x = C*(1.0 - fabs(fmod(Htag, 2.0) - 1.0));
int i = floor(Htag);
switch (i)
{
case 0 :
R1 = C;
G1 = x;
B1 = 0;
break;
case 1:
R1 = x;
G1 = C;
B1 = 0;
break;
case 2:
R1 = 0;
G1 = C;
B1 = x;
break;
case 3:
R1 = 0;
G1 = x;
B1 = C;
break;
case 4:
R1 = x;
G1 = 0;
B1 = C;
break;
case 5:
R1 = C;
G1 = 0;
B1 = x;
break;
default:
R1 = 0;
G1 = 0;
B1 = 0;
break;
}
m = V - C;
image[row][column][R] = round((R1 + m) * 255);
image[row][column][G] = round((G1 + m) * 255);
image[row][column][B] = round((B1 + m) * 255);
}
}
}
void HSVfloattoGrayconvert(unsigned char grayimage[][NUMBER_OF_COLUMNS], float hsvimage[][NUMBER_OF_COLUMNS], char hsv)
{
//grayimage , flaotimage , h/s/v
float factor;
if (hsv == 'h' || hsv == 'H') factor = 1.0f/360.0f;
else factor = 1.0f;
for (int row = 0; row < NUMBER_OF_ROWS; row++)
{
for (int column = 0; column < NUMBER_OF_COLUMNS; column++)
{
grayimage[row][column] = (unsigned char) (0.5f + 255.0f * (float)hsvimage[row][column] * factor);
}
}
}
int KernelX[3][3] = {
{-1,0,+1}, {-2,0,2}, {-1,0,1 }
};
int KernelY[3][3] = {
{-1,-2,-1}, {0,0,0}, {1,2,1}
};
void GenerateTestImage(unsigned char image[][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS])
{
for (unsigned y = 0; y < NUMBER_OF_ROWS; y++)
{
for (unsigned x = 0; x < NUMBER_OF_COLUMNS; x++)
{
image[y][x][R] = x % 256;
image[y][x][G] = y % 256;
image[y][x][B] = (255-x) % 256;
}
}
}
void GenerateTestImage(unsigned char image[][NUMBER_OF_COLUMNS])
{
for (unsigned y = 0; y < NUMBER_OF_ROWS; y++)
{
for (unsigned x = 0; x < NUMBER_OF_COLUMNS; x++)
{
image[x][y] = x % 256;
}
}
}
// Color (three channel) images
void SaveImage(unsigned char image[][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS], const std::string& filename)
{
FILE* fp = fopen(filename.c_str(), "w");
fprintf(fp, "P6\n%u %u\n255\n", NUMBER_OF_COLUMNS, NUMBER_OF_ROWS);
fwrite(image, NUMBER_OF_COLORS, NUMBER_OF_ROWS*NUMBER_OF_COLUMNS, fp);
fclose(fp);
}
// Grayscale (single channel) images
void SaveImage(unsigned char image[][NUMBER_OF_COLUMNS], const std::string& filename)
{
FILE* fp = fopen(filename.c_str(), "w");
fprintf(fp, "P5\n%u %u\n255\n", NUMBER_OF_COLUMNS, NUMBER_OF_ROWS);
fwrite(image, 1, NUMBER_OF_ROWS*NUMBER_OF_COLUMNS, fp);
fclose(fp);
}
unsigned char ColorImage1[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS][NUMBER_OF_COLORS];
unsigned char Himage[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char Simage[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
unsigned char Vimage[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
float HimageGray[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
float SimageGray[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
float VimageGray[NUMBER_OF_ROWS][NUMBER_OF_COLUMNS];
int main()
{
// Test input
GenerateTestImage(ColorImage1);
SaveImage(ColorImage1, "test_input.ppm");
//saves hsv in float array
RGBtoHSV(ColorImage1, HimageGray, VimageGray, SimageGray);
//saves hsv float arrays in unsigned char arrays
HSVfloattoGrayconvert(Himage, HimageGray, 'h');
HSVfloattoGrayconvert(Vimage, VimageGray, 'v');
HSVfloattoGrayconvert(Simage, SimageGray, 's');
SaveImage(Himage, "P22H.pgm");
SaveImage(Vimage, "P22V.pgm");
SaveImage(Simage, "P22S.pgm");
// Convert back to get the original test image
HSVtoRGB(ColorImage1, HimageGray, VimageGray, SimageGray);
SaveImage(ColorImage1, "test_output.ppm");
return 0;
}
The input image was generated by a very simple algorithm which gives us gradients in each dimension, so we can easily inspect and verify the expected output. I used ppm/pgm files as they are simpler to write and more portable than BMP.
Hope this helps - let me know if you have any questions.

Saving FFT Spectrum in Fmod Studio C++

I'm trying to save the Spectrum in my FMOD_DSP_PARAMETER_FFT but I'm only receiving the spectrum full of zeros, if you can watch my mistake I will agree, I think that I'm not connecting well the DSP to the channel or something similar because I don't find the error in the code.
My code now is like this:
FMOD::System *system;
FMOD::Sound *sound1;
FMOD::Channel *channel = 0;
FMOD::ChannelGroup *mastergroup;
FMOD::ChannelControl *control;
FMOD::DSP *mydsp, *dsphead, *dspchannelmixer;
FMOD::DSPConnection *conection;
FMOD_RESULT result;
unsigned int version;
result = FMOD::System_Create(&system);
result = system->getVersion(&version);
result = system->init(32, FMOD_INIT_NORMAL, NULL);
result = system->createSound("MySong.mp3",FMOD_DEFAULT, 0, &sound1);
result = sound1->setMode(FMOD_LOOP_NORMAL);
result = system->playSound(sound1, 0, true, &channel);
/*
Create the DSP effect.
*/
result = system->getMasterChannelGroup(&mastergroup);
result = system->createDSPByType(FMOD_DSP_TYPE_FFT, &mydsp);
result = system->getMasterChannelGroup(&mastergroup);
result = mastergroup->addDSP(0, mydsp);
result = mydsp->setBypass(true);
result = mydsp->setActive(true);
char s[256];
unsigned int len;
float freq[32];
float fft = 0;
std::vector<float> fftheights;
float m_spectrum_data[FFT_NUM_BINS];
while (1) { //program loop
unsigned int ms = 0;
unsigned int lenms = 0;
bool playing = 0;
bool paused = 0;
int channelsplaying = 0;
if (channel)
{
FMOD::Sound *currentsound = 0;
result = channel->setPaused(false);
result = channel->setMute(false);
result = channel->isPlaying(&playing);
result = channel->getPaused(&paused);
result = channel->setVolume(0.5);
result = channel->getPosition(&ms, FMOD_TIMEUNIT_MS);
channel->getCurrentSound(&currentsound);
if (currentsound)
{
result = currentsound->getLength(&lenms, FMOD_TIMEUNIT_MS);
}
}
system->getChannelsPlaying(&channelsplaying);
FMOD_DSP_PARAMETER_FFT *fftparameter;
float val;
char s[256];
unsigned int len;
float *data = 0;
float freq[32];
int rate, chan, nyquist;
int windowsize = 1024;
result = system->getSoftwareFormat(&rate, 0, 0);
result = mydsp->setParameterInt(FMOD_DSP_FFT_WINDOWTYPE, FMOD_DSP_FFT_WINDOW_TRIANGLE);
result = mydsp->setParameterInt(FMOD_DSP_FFT_WINDOWSIZE, windowsize);
result = mydsp->getParameterFloat(FMOD_DSP_FFT_DOMINANT_FREQ, &val, 0, 0);
result = mydsp->getParameterData(FMOD_DSP_FFT_SPECTRUMDATA, (void **)&fftparameter, &len, s, 256);
nyquist = windowsize / 2;
for (chan = 0; chan < 2; chan++)
{
float average = 0.0f;
float power = 0.0f;
for (int i = 0; i < nyquist - 1; ++i)
{
float hz = i * (rate * 0.5f) / (nyquist - 1);
int index = i + (16384 * chan);
if (fftparameter->spectrum[chan][i] > 0.0001f) // arbitrary cutoff to filter out noise
{
average += data[index] * hz;
power += data[index];
}
}
if (power > 0.001f)
{
freq[chan] = average / power;
}
else
{
freq[chan] = 0;
}
}
printf("\ndom freq = %d : %.02f %.02f\n", (int)val, freq[0], freq[1]);
}
My fftparameter->spectrum is always an array of zero values...
Is posible to connect it without modify the sound that is playing??
Thank you.
There are a few standout issues in your code example.
The FFT DSP has been bypassed with result = mydsp->setBypass(true); causing it to not process.
There are no calls to System::update in the main loop.
The main loop has no sleep so it will spin as fast as possible.
I think your main issue is probably the setBypass call, use setBypass(false).

Converting gray scale BMP to full color

I'm learning some basic image processing and using a gray scale BMP file to work some algorithms but I'd like to convert my code to put out color BMP files instead of gray scale. I'm using the EasyBMP library and have the following to read in and write to my BMP file:
bool Image::readFromBMPFile(const std::string & inputFileName){
bool success = true;
// use BMP object to read image
BMP inputImage;
success = inputImage.ReadFromFile(inputFileName.c_str() );
if( success ){
// allocate memory for image (deleting old, if exists)
m_numRows = inputImage.TellHeight();
m_numCols = inputImage.TellWidth();
if( m_pixels != NULL ){
// deallocate old memory
delete [] m_pixels;
}
m_pixels = new double[m_numRows * m_numCols];
// copy pixels
for( int r = 0; r < m_numRows; ++r ){
for( int c = 0; c < m_numCols; ++c ){
RGBApixel pixelVal = inputImage.GetPixel(c, r);
double val = (double) pixelVal.Blue + (double) pixelVal.Green + (double) pixelVal.Red;
val = (val / 3.0 + 0.5);
m_pixels[r * m_numCols + c] = val;
}
}
}
return success;
}
bool Image::writeToBMPFile(const std::string & outputFileName){
bool success = true;
if( m_pixels != NULL ){
// create bitmap image
BMP outputImage;
outputImage.SetSize(m_numCols, m_numRows);
outputImage.SetBitDepth( 24 );
double maxVal = m_pixels[0];
double minVal = m_pixels[0];
// Maximum and minimum values
for( int i = 1; i < m_numRows * m_numCols; ++i ){
if( m_pixels[i] > maxVal ){
maxVal = m_pixels[i];
}
if( m_pixels[i] <= minVal ){
minVal = m_pixels[i];
}
}
for( int r = 0; r < m_numRows; ++r ){
for( int c = 0; c < m_numCols; ++c ){
// get pixel value and clamp between 0 and 255
double val = 255.0 * (m_pixels[r * m_numCols + c] - minVal) / (maxVal - minVal);
if( val < 0 ){
val = 0;
}
if( val > 255 ){
val = 255;
}
// set output color based on mapping
RGBApixel pixelVal;
pixelVal.Blue = (int)val;
pixelVal.Green = (int)val;
pixelVal.Red = (int)val;
outputImage.SetPixel(c, r, pixelVal);
}
}
// write to file
success = outputImage.WriteToFile( outputFileName.c_str() );
} else {
success = false;
}
return success;
}
What kind of steps would I try to make my program compatible with RGB images?