In my OpenCL Dijkstra's algorithm implementation, the slowest part by far is writing the 1D reduced graph matrix to the kernel argument, which is global memory.
My graph is a two dimensional array; for OpenCL it gets reduced to a 1D array like so:
for (int q = 0; q < numberOfVertices; q++)
{
for (int t = 0; t < numberOfVertices; t++)
{
reducedGraph[q * numberOfVertices + t] = graph[q][t];
}
}
Put into a buffer:
cl::Buffer graphBuffer = cl::Buffer(context, CL_MEM_READ_WRITE, numberOfVertices * numberOfVertices * sizeof(int));
Setting the argument then takes an extremely long time. For my test with 5,760,000 vertices, writing the data to the argument takes more than 3 seconds while the algorithm itself takes less than a millisecond:
kernel_dijkstra.setArg(5, graphBuffer);
The kernel uses the graph as a global argument:
void kernel min_distance(global int* dist, global bool* verticesSet, const int sizeOfChunks, global int* result, const int huge_int, global int* graph, const int numberOfVertices)
Is there any way to speed this up? Thank you!
Edit: My Kernel's code:
// Kernel source, calculates minimum distance in segment and relaxes graph.
std::string kernel_code =
void kernel min_distance(global int* dist, global bool* verticesSet, const int sizeOfChunks, global int* result, const int huge_int, global int* graph, const int numberOfVertices) {
for (int b = 0; b < numberOfVertices; b++) {
int gid = get_global_id(0);
int min = huge_int, min_index = -1;
for (int v = gid * sizeOfChunks; v < sizeOfChunks * gid + sizeOfChunks; v++) {
if (verticesSet[v] == false && dist[v] < min && dist[v] != 0) {
min = dist[v];
min_index = v;
}
}
result[gid] = min_index;
if (gid != 0) continue;
min = huge_int;
min_index = -1;
int current_min;
for (int a = 0; a < numberOfVertices; a++) {
current_min = dist[result[a]];
if (current_min < min && current_min != -1 && current_min != 0) { min = current_min; min_index = result[a]; }
}
verticesSet[min_index] = true;
// relax graph with found global min.
int a = 0;
int min_dist = dist[min_index];
int current_dist;
int compare_dist;
for (int i = min_index * numberOfVertices; i < min_index * numberOfVertices + numberOfVertices; i++) {
current_dist = dist[a];
compare_dist = graph[min_index * numberOfVertices + a];
if (current_dist > min_dist + compare_dist && !verticesSet[a] && compare_dist != 0) {
dist[a] = min_dist + compare_dist;
}
a++;
}
}
};
How I enqueue it:
numberOfComputeUnits = default_device.getInfo<CL_DEVICE_MAX_COMPUTE_UNITS>();
queue.enqueueNDRangeKernel(kernel_dijkstra, 0, cl::NDRange(numberOfVertices), numberOfComputeUnits);
The error here is that your memory allocation is way too large: 5.76M vertices need a 133TB buffer because the buffer size is quadratic in vertex number. Neither the C++ compiler nor OpenCL will report this as an error and even your kernel will appearently start and run just fine, but in reality it does not compute anything because memory is not enough, and you will get random and undefined results.
Generally .setArg(...) should not take longer than a few milliseconds. Also it is beneficial to do the initialization part (containing buffer allocation, .setArg(...) etc.) only once in the beginning and then repeatedly run the kernel or exchange data in the buffers without reallocation.
Related
I am attempting to write a naive implementation of the Short-Time Fourier Transform using consecutive FFT frames in time, calculated using the FFTW library, but I am getting a Segmentation fault and cannot work out why.
My code is as below:
// load in audio
AudioFile<double> audioFile;
audioFile.load ("assets/example-audio/file_example_WAV_1MG.wav");
int N = audioFile.getNumSamplesPerChannel();
// make stereo audio mono
double fileDataMono[N];
if (audioFile.isStereo())
for (int i = 0; i < N; i++)
fileDataMono[i] = ( audioFile.samples[0][i] + audioFile.samples[1][i] ) / 2;
// setup stft
// (test transform, presently unoptimized)
int stepSize = 512;
int M = 2048; // fft size
int noOfFrames = (N-(M-stepSize))/stepSize;
// create Hamming window vector
double w[M];
for (int m = 0; m < M; m++) {
w[m] = 0.53836 - 0.46164 * cos( 2*M_PI*m / M );
}
double* input;
// (pads input array if necessary)
if ( (N-(M-stepSize))%stepSize != 0) {
noOfFrames += 1;
int amountOfZeroPadding = stepSize - (N-(M-stepSize))%stepSize;
double ipt[N + amountOfZeroPadding];
for (int i = 0; i < N; i++) // copy values from fileDataMono into input
ipt[i] = fileDataMono[i];
for (int i = 0; i < amountOfZeroPadding; i++)
ipt[N + i] = 0;
input = ipt;
} else {
input = fileDataMono;
}
// compute stft
fftw_complex* stft[noOfFrames];
double frames[noOfFrames][M];
fftw_plan fftPlan;
for (int i = 0; i < noOfFrames; i++) {
stft[i] = (fftw_complex*)fftw_malloc(sizeof(fftw_complex) * M);
for (int m = 0; m < M; m++)
frames[i][m] = input[i*stepSize + m] * w[m];
fftPlan = fftw_plan_dft_r2c_1d(M, frames[i], stft[i], FFTW_ESTIMATE);
fftw_execute(fftPlan);
}
// compute istft
double* outputFrames[noOfFrames];
double output[N];
for (int i = 0; i < noOfFrames; i++) {
outputFrames[i] = (double*)fftw_malloc(sizeof(double) * M);
fftPlan = fftw_plan_dft_c2r_1d(M, stft[i], outputFrames[i], FFTW_ESTIMATE);
fftw_execute(fftPlan);
for (int m = 0; i < M; m++) {
output[i*stepSize + m] += outputFrames[i][m];
}
}
fftw_destroy_plan(fftPlan);
for (int i = 0; i < noOfFrames; i++) {
fftw_free(stft[i]);
fftw_free(outputFrames[i]);
}
// output audio
AudioFile<double>::AudioBuffer outputBuffer;
outputBuffer.resize (1);
outputBuffer[0].resize(N);
outputBuffer[0].assign(output, output+N);
bool ok = audioFile.setAudioBuffer(outputBuffer);
audioFile.setAudioBufferSize (1, N);
audioFile.setBitDepth (16);
audioFile.setSampleRate (8000);
audioFile.save ("out/audioOutput.wav");
The segfault seems to be being raised by the first fftw_malloc when computing the forward STFT.
Thanks in advance!
The relevant bit of code is:
double* input;
if ( (N-(M-stepSize))%stepSize != 0) {
double ipt[N + amountOfZeroPadding];
//...
input = ipt;
}
//...
input[i*stepSize + m];
Your input pointer points at memory that exists only inside the if statement. The closing brace denotes the end of the lifetime of the ipt array. When dereferencing the pointer later, you are addressing memory that no longer exists.
My code seems to have a bug somewhere but I just can't catch it. I'm passing a 2d array to three sequential functions. First function populates it, second function modifies the values to 1's and 0's, the third function counts the 1's and 0's. I can access the array easily inside the first two functions, but I get an access violation at the first iteration of the third one.
Main
text_image_data = new int*[img_height];
for (i = 0; i < img_height; i++) {
text_image_data[i] = new int[img_width];
}
cav_length = new int[numb_of_files];
// Start processing - load each image and find max cavity length
for (proc = 0; proc < numb_of_files; proc++)
{
readImage(filles[proc], text_image_data, img_height, img_width);
threshold = makeBinary(text_image_data, img_height, img_width);
cav_length[proc] = measureCavity(bullet[0], img_width, bullet[1], img_height, text_image_data);
}
Functions
int makeBinary(int** img, int height, int width)
{
int threshold = 0;
unsigned long int sum = 0;
for (int k = 0; k < width; k++)
{
sum = sum + img[1][k] + img[2][k] + img[3][k] + img[4][k] + img[5][k];
}
threshold = sum / (width * 5);
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
img[i][j] = img[i][j] > threshold ? 1 : 0;
}
}
return threshold;
}
// Count pixels - find length of cavity here
int measureCavity(int &x, int& width, int &y, int &height, int **img)
{
double mean = 1.;
int maxcount = 0;
int pxcount = 0;
int i = x - 1;
int j;
int pxsum = 0;
for (j = 0; j < height - 2; j++)
{
while (mean > 0.0)
{
for (int ii = i; ii > i - 4; ii--)
{
pxsum = pxsum + img[ii][j] + img[ii][j + 1];
}
mean = pxsum / 4.;
pxcount += 2;
i += 2;
pxsum = 0;
}
maxcount = std::max(maxcount, pxcount);
pxcount = 0;
j++;
}
return maxcount;
}
I keep getting an access violation in the measureCavity() function. I'm passing and accessing the array text_image_data the same way as in makeBinary() and readImage(), and it works just fine for those functions. The size is [550][70], I'm getting the error when trying to access [327][0].
Is there a better, more reliable way to pass this array between the functions?
I have this funcition (RotateSlownessTop) and it's called about 800 times computing the corresponding values. But the calculation is slow and is there a way I can make the computations faster.
The number of element in X/Y is 7202. (Fairly large set)
I did the performance analysis and the screenshot has been attached.
void RotateSlownessTop(vector <double> &XR1, vector <double> &YR1, float theta = 0.0)
{
Matrix2d a;
a(0,0) = cos(theta);
a(0,1) = -sin(theta);
a(1, 0) = sin(theta);
a(1, 1) = cos(theta);
vector <double> XR2(7202), YR2(7202);
for (size_t i = 0; i < X.size(); ++i)
{
XR2[i] = (a(0, 0)*X[i] + a(0, 1)*Y[i]);
YR2[i] = (a(1, 0)*X[i] + a(1, 1)*Y[i]);
}
size_t i = 0;
size_t j = 0;
while (i < YR2.size())
{
if (i > 0)
if ((XR2[i]>0) && (XR2[i-1]<0))
j = i;
if (YR2[i] > (-1e-10) && YR2[i]<0.0)
YR2[i] = 0.0;
if (YR2[i] < (1e-10) && YR2[i]>0.0)
YR2[i] = -YR2[i];
if ( YR2[i]<0.0)
{
YR2.erase(YR2.begin() + i);
XR2.erase(XR2.begin() + i);
--i;
}
++i;
}
size_t k = 0;
while (j < YR2.size())
{
YR1[k] = (YR2[j]);
XR1[k] = (XR2[j]);
YR2.erase(YR2.begin() + j);
XR2.erase(XR2.begin() + j);
++k;
}
size_t l = 0;
for (; k < XR1.size(); ++k)
{
XR1[k] = XR2[l];
YR1[k] = YR2[l];
l++;
}
}
Edit1: I have updated the code by replacing all push_back() with operator[], since I read somewhere that this is much faster.
However the whole program is still slow. Any suggestions are appreciated.
If the size is large, you can improve the push_back by pre-allocating the space needed. Add this before the loop:
XR2.reserve(X.size());
YR2.reserve(X.size());
We are writing a method (myFunc) that writes some data to the array. The array must be a field of the class (MyClass).
Example:
class MyClass {
public:
MyClass(int dimension);
~MyClass();
void myFunc();
protected:
float* _nodes;
};
MyClass::MyClass(int dimension){
_nodes = new float[dimension];
}
void MyClass::myFunc(){
for (int i = 0; i < _dimension; ++i)
_nodes[i] = (i % 2 == 0) ? 0 : 1;
}
The method myFunc is called near 10000 times and it takes near 9-10 seconds (with other methods).
But if we define myFunc as:
void MyClass::myFunc(){
float* test = new float[_dimension];
for (int i = 0; i < _dimension; ++i)
test[i] = (i % 2 == 0) ? 0 : 1;
}
our programm works much faster - it takes near 2-3 seconds (if it's calles near 10000 times).
Thanks in advance!
This may help (in either case)
for (int i = 0; i < _dimension; )
{
test[i++] = 0.0f;
test[i++] = 1.0f;
}
I'm assuming _dimension is even, but easy to fix if it is not.
If you want to speed up Debug-mode, maybe help the compiler, try
void MyClass::myFunc(){
float* const nodes = _nodes;
const int dimension = _dimension;
for (int i = 0; i < dimension; ++i)
nodes[i] = (i % 2 == 0) ? 0.0f : 1.0f;
}
Of course, in reality you should focus on using Release-mode for everything performance-related.
In your example code, you do not initialise _dimension in the constructor, but use it in MyFunc. So you might be filling millions of entries in the array even though you have only allocated a few thousand entries. In the example that works, you use the same dimension for creating and filling the array so you are probably initialising it correctly in that case..
Just make sure that _dimension is properly initialised.
This is faster on most machine.
void MyClass::myFunc(){
float* const nodes = _nodes;
const int dimension = _dimension;
if(dimension < 2){
if(dimension < 1)
return;
nodes[0] = 0.0f;
return;
}
nodes[0] = 0.0f;
nodes[1] = 1.0f;
for (int i = 2; ; i <<= 1){
if( (i << 1) < dimension ){
memcpy(nodes + i, nodes, i * sizeof(float));
}else{
memcpy(nodes + i, nodes, (dimension - i) * sizeof(float));
break;
}
}
}
Try this:
memset(test, 0, sizeof(float) * _dimension));
for (int i = 1; i < _dimension; i += 2)
{
test[i] = 1.0f;
}
You can also run this piece once and store the array at static location.
For each consecutive iteration you can address the stored data without any computation.
I am trying to speed up a piece of code that is ran a total of 150,000,000 times.
I have analysed it using "Very Sleepy", which has indicated that the code is spending the most time in these 3 areas, shown in the image:
The code is as follows:
double nonLocalAtPixel(int ymax, int xmax, int y, int x , vector<nodeStructure> &nodeMST, int squareDimension, Mat &inputImage) {
vector<double> nodeWeights(8,0);
vector<double> nodeIntensities(8,0);
bool allZeroWeights = true;
int numberEitherside = (squareDimension - 1) / 2;
int index = 0;
for (int j = y - numberEitherside; j < y + numberEitherside + 1; j++) {
for (int i = x - numberEitherside; i < x + numberEitherside + 1; i++) {
// out of range or the centre pixel
if (j<0 || i<0 || j>ymax || i>xmax || (j == y && i == x)) {
index++;
continue;
}
else {
int centreNodeIndex = y*(xmax+1) + x;
int thisNodeIndex = j*(xmax+1) + i;
// add to intensity list
Scalar pixelIntensityScalar = inputImage.at<uchar>(j, i);
nodeIntensities[index] = ((double)*pixelIntensityScalar.val);
// find weight from p to q
float weight = findWeight(nodeMST, thisNodeIndex, centreNodeIndex);
if (weight!=0 && allZeroWeights) {
allZeroWeights = false;
}
nodeWeights[index] = (weight);
index++;
}
}
}
// find min b
int minb = -1;
int bCost = -1;
if (allZeroWeights) {
return 0;
}
else {
// iteratate all b values
for (int i = 0; i < nodeWeights.size(); i++) {
if (nodeWeights[i]==0) {
continue;
}
double thisbCost = nonLocalWithb(nodeIntensities[i], nodeIntensities, nodeWeights);
if (bCost<0 || thisbCost<bCost) {
bCost = thisbCost;
minb = nodeIntensities[i];
}
}
}
return minb;
}
Firstly, I assume the spent time indicated by Very Sleepy means that the majority of time is spent allocating the vector and deleting the vector?
Secondly, are there any suggestions to speed this code up?
Thanks
use std::array
reuse the vectors by passing it as an argument of the function or a global variable if possible (not aware of the structure of the code so I need more infos)
allocate one 16 vector size instead of two vectors of size 8. Will make your memory less fragmented
use parallelism if findWeight is thread safe (you need to provide more details on that too)