CUDA Simple Array Search - Shared Memory - c++

I am writing a function to search for the first occurrence that matches a specific criteria. In my particular problem, I am interested in finding the array index corresponding to a circle that intersects with a given point. I have three arrays that collectively describe the circles: x coordinates, y coordinates, and radii (cx, cy, cz). Given an input point I calculate whether or not it will intersect each of the circles defined by the 3 arrays. I chose to declare the x, y, radii arrays in global memory because my search function will be called often and these arrays will not change.
This function seems pretty straight forward, but I'm getting the following error:
cudaMemcpy(&res, dev_idx, sizeof(int), cudaMemcpyDeviceToHost) returned the launch timed out and was terminated(6)
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define HANDLE_ERROR(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
// global memory arrays on the device
__device__ __constant__ double* cx;
__device__ __constant__ double* cy;
__device__ __constant__ double* cr;
__global__ void _cuda_find_containing_circle(double px, double py, int* fidx, int count){
// px, py: x and y coordinates of the search point
// fidx: a device variable to return the index of the matching circle
// count: total number of circle elements in the device arrays
// get the current thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < count){
// calculate the hypotenuse of the point and grab the radius
float hypot = (float)hypot(cx[tid] - px, cy[tid] - py);
float radius = (float)cr[tid];
// if the hypotenuse is within the radius, return the current index
// this looks like the problem, i.e. hardcoding to "if(tid = 10){" does not result in an error
if (hypot <= radius){
atomicMin(fidx, tid);
// I've also tried setting the idx directly
//*fidx = tid;
}
// increment thread id
tid += blockDim.x * gridDim.x;
}
}
void main(){
// define a search point for testing
int px = 100;
int py = 150;
// initialize cx, cy, cz device values using the following arrays
double *circlex;
double *circley;
double *circler;
int count = 100;
circlex = (double *) malloc(sizeof(double) * count);
circley = (double *) malloc(sizeof(double) * count);
circler = (double *) malloc(sizeof(double) * count);
// populate arrays with values that will not pass the search criteria
for (int i = 0; i < count; i++) {
circlex[i] = 2.1;
circley[i] = 3.2;
circler[i] = 0.0;
}
// add a single value that will pass the search criteria (for testing)
circlex[count - 5] = 101.0;
circley[count - 5] = 160.0;
circler[count - 5] = 11.0; //hypot should result in 10.0498 < 11
// copy these data onto the device
HANDLE_ERROR(cudaMemcpyToSymbol(cx, &circlex, sizeof(circlex), 0, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpyToSymbol(cy, &circley, sizeof(circley), 0, cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpyToSymbol(cr, &circler, sizeof(circler), 0, cudaMemcpyHostToDevice));
// create an object on the device to store the search index result
int* dev_idx;
int idx = 999; // initial condition. If -999 is returned then I know that a match was not found
HANDLE_ERROR(cudaMalloc((void **) &dev_idx, sizeof(int)));
HANDLE_ERROR(cudaMemcpy(dev_idx, &idx, sizeof(int), cudaMemcpyHostToDevice));
// call the search function
_cuda_find_containing_circle <<<128, 128>>> (px, py, dev_idx, count);
// get the search result
// this line throws the following error: cudaMemcpy(&res, dev_idx, sizeof(int), cudaMemcpyDeviceToHost) returned the launch timed out and was terminated(6)
int res;
HANDLE_ERROR(cudaMemcpy(&res, dev_idx, sizeof(int), cudaMemcpyDeviceToHost));
cout << "IDX = " << res << endl;
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
Is there something fundamentally incorrect with my approach?

The core problem is this:
cudaMemcpyToSymbol(cx, &circlex, sizeof(circlex), 0, cudaMemcpyHostToDevice);
cx is a unintialised pointer to which you are copying sizeof(double *) bytes from a source array of double values, leaving it containing a nonsense address which results in an illegal memory operation in your kernel.
Do something like this instead:
double * _cx; cudaMalloc((void **)&_cx, sizeof(double) * count);
cudaMemcpy(_cx, circlex, sizeof(double) * count, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(cx, &_cx, sizeof(_cx));
i.e. allocate a device memory buffer, copy the host source data to that buffer, then copy the address of that buffer to the constant memory pointer.

Related

How to keep track of a global index within an MPI_Scatter(...) function?

I am working on an MPI program in which process 0 reads a .raw file, places it in a pointer array, and distribute it to other processes so they can each do work. I am currently distributing sections of the pointer array by using MPI_Scatter which seems to be working.
The problem:
The calculations to do on each sub array requires knowing what its global index is. Currently I only know how to go over its local index. Below is the code:
#include <iostream>
#include <cstdint>
#include <mpi.h>
#define MCW MPI_COMM_WORLD
const int WIDTH = 1300;
const int HEIGHT = 600;
//work function that requires the *global* x,y coordinate of the file
float calc_vorticity(int x, int y, int width, int height, float* vector_field) {
float d_x = 0.01;
float d_y = 0.01;
uint32_t index = y * width + x;
int start_x = (x == 0) ? 0 : x - 1;
int end_x = (x == width - 1) ? x : x + 1;
int start_y = (y == 0) ? 0 : y - 1;
int end_y = (y == height - 1) ? y : y + 1;
uint32_t duidx = (start_y * width + end_x) * 2;
uint32_t dvidx = (end_y * width + start_x) * 2;
std::pair<double, double> fdu(vector_field[duidx], vector_field[duidx + 1]);
std::pair<double, double> fdv(vector_field[dvidx], vector_field[dvidx + 1]);
std::pair<double, double> vec0(vector_field[index * 2], vector_field[index * 2 + 1]);
float duy = (fdu.second - vec0.second) / (d_x * (end_x - start_x));
float dvx = (fdv.first - vec0.first) / (d_y * (end_y - start_y));
return duy - dvx;
}
int main(int argc, char** argv) {
int rank, size;
MPI_Init(&argc, &argv); //initialize MPI
MPI_Comm_rank(MCW, &rank); //Assign each processor a rank
MPI_Comm_size(MCW, &size); //Assign the size
//allocate the space
float* vector_field = new float[WIDTH * HEIGHT * 2];
//have rank 0 read in image
if(!rank) {
FILE* file = fopen("../data/cyl2d_1300x600_float32[2].raw", "r");
fread(vector_field, sizeof(float), WIDTH * HEIGHT * 2, file);
fclose(file);
}
//create buffer of data & catch all processes up to this point
float* buffer = new float[(WIDTH * HEIGHT * 2) / size];
MPI_Barrier(MCW);
//broadcast the array to other ranks
MPI_Scatter(
vector_field, //array being sent out that resides on root process
(WIDTH * HEIGHT * 2) / size, //number of items to send
MPI_FLOAT, //type of items sent
buffer, //buffer of data that holds number of items in sent array
(WIDTH * HEIGHT * 2) / size, //number of items to be recieved
MPI_FLOAT, //type of items recieved
0, //originating process
MCW //communicator
);
//allocate space for results
//Note that we only need half the size of buffer as vorticity is represented with only 1 float
float* vorticities = new float[(WIDTH * HEIGHT) / size];
MPI_Barrier(MCW);
//calculate value for each cell
// !!! Problem here !!!
/* Call calc_vorticity(...) sending the current **global** x/y coordinate */
/*
Serial implementation call looks like this:
for (int i=0; i<WIDTH; i++){
for (int j=0; j<HEIGHT; j++){
vorticities[j*WIDTH+i] = vorticity(i, j, WIDTH, HEIGHT, vector_field);
}
}
*/
//assure all processes are caught up
MPI_Barrier(MCW);
//have process 0 write to output file
if(!rank) {
FILE* output = fopen("../data/vorticities_dist.raw", "w");
fwrite(vorticities, sizeof(float), WIDTH * HEIGHT, output);
fclose(output);
}
//delete used memory
delete[] vector_field;
delete[] vorticities;
//Finalize & exit
MPI_Finalize();
return 1;
}
I've looked at a lot of MPI_Gather Docs, and I don't think Scatter/Gather is the answer as you cannot also broadcast the global indexes with that pointer array. I was also tossing the idea around of creating my own object which has 2 elements, num & global_index. This way I could broadcast an array of objects which carry their global index with them, though I don't know how to scatter custom data types.

How does cudaLaunchKernel know the array size of "void **args"?

I know the size of array can be got with following code:
int a = 12;
float b = 12.0f;
char c = 'c';
void *param[] = { (void*)&a, (void*)&b, (void*)&c };
// the element size of param
size_t size = sizeof(param)/sizeof(void*);
But now, I want param be passed to a function named TryToGetTheSize, and get a size as the return value.
size_t TryToGetTheSize(void **array)
{
// return the size of void* array
}
...
size_t size = TryToGetTheSize(param);
I've tried an idea from the implementation of strlen, which incrementally moves the char* pointer to next continuous memory space, and counting by check the value of current position is '\0' or not.
But that method does not work with void**, there is no way to check the validation of void* indicated address.
So, it seems impossible to know the size with only given the void** array, but when I lookup CUDA API, I found this:
cudaLaunchKernel(const void* func, dim3 gridDim, dim3 blockDim, void** args, size_t sharedMem, cudaStream_t stream)
In the CUDA, we usually use <<<>>> as kernel launching, but it's the same if we manually setup the arugments and call cudaLaunchKernel directly
In cudaLaunchKerenl API, I notice the fourth parameter args used as parameters of kernel function func, and there is no other parameters describe the size of args
So, I have two questions:
1) How does cudaLaunchKernel know the size of void** args?
2) If cudaLaunchKernel doesn't need to know the size of void** args, how does it work?
Here are my sample code that use cudaLaunchKernel instead of <<<>>> in kernel launching.
#include<stdio.h>
#include<stdlib.h>
#include<cuda_runtime.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
int main(void)
{
int N = 1 << 20;
float *hx, *hy, *dx, *dy;
hx = (float*)malloc(N * sizeof(float));
hy = (float*)malloc(N * sizeof(float));
cudaMalloc(&dx, N * sizeof(float));
cudaMalloc(&dy, N * sizeof(float));
for (int idx = 0; idx < N; idx++)
{
hx[idx] = 1.0f;
hy[idx] = 2.0f;
}
cudaMemcpy(dx, hx, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dy, hy, N * sizeof(float), cudaMemcpyHostToDevice);
unsigned int threads = 256;
unsigned int blocks = (N + 255) / threads;
float ratio = 2.0f;
//saxpy<<<blocks, threads>>>(N, ratio, dx, dy);
void *args[] = { &N, &ratio, &dx, &dy };
cudaLaunchKernel((void*)saxpy, dim3(blocks), dim3(threads), args, 0, NULL);
cudaMemcpy(hy, dy, N * sizeof(float), cudaMemcpyDeviceToHost);
float max_error = 0.0f;
for (int jdx = 0; jdx < N; jdx++)
{
max_error = max(max_error, abs(hy[jdx] - 4.0f));
}
printf("Max Error: %f\n", max_error);
cudaFree(dx);
cudaFree(dy);
free(hx);
free(hy);
return 0;
}
Quoting from the related documentation:
The number of kernel parameters and their offsets and sizes do not
need to be specified as that information is retrieved directly from
the kernel's image.
Every CUDA device function has its argument list stored with the statically compiled function code. The API, therefore, knows exactly how many argument entries a call to cudaLaunchKernel requires. You will get a segfault or undefined behaviour if you supply too few to the launch call.

Numerical error in cuda/cublas simple kernel using particular input

I am working with cuda and cublas and I was trying to implement simple operations like matrix element-wise multiplication/division. I am using only float for my experiments. I know the most obvious way to do it is to write a kernel like this one:
__global__ void mul_elementwise(const unsigned int n, float* source, float* dest, const float value)
{
const unsigned int offset = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = offset; i < n; i += stride)
{
dest[i] = source[i] * value;
}
}
This kernel can work both for multiplication and division (just using 1/x as value). But this can be achieved using cublas library too: suppose we have a matrix A m x n stored in column-major style and a scalar x, then setting alpha = x or alpha = 1/x and d_ones as a vector of m*n 1s, we can invoke and obtain the same result
cublasSaxpy(cublas_handle, m * n, &alpha, d_ones, 1, A_dev, 1);
Both methods work just fine, but I am facing few problems with some particular matrix, for which both methods do no work. I isolated this big matrix and build a MCVE available here (you can compile it with nvcc mcve.cu -lcublas. As you can see the results in both cases are totally wrong: host result is totally different, I am trying to figure out what's going on. I do not see any error in code but maybe i should try to use double instead of float and see what happens.
Any opinions about this situation? Thanks in advance!
EDIT #1 I tried using doubles but nothing changes if I use cublasDaxpy meanwhile it works perfectly with the custom kernel. I think the values are too small so single floating point precision is not enough.
Interesting MCVE. Wouldn't it have been possible to shrink your vector down to just a few elements? Isn't it possible to show the calculation discrepancy based on just 1 vector element?
Anyway I see several problems.
Your kernel implements the following function: y=alpha*x. But SAXPY implements y=alpha*x+y. Now, if y started out as (all) zero, then these two would be the same. But that's not what you have:
CUBLAS Your Kernel
---------------------------
alpha: alpha alpha
x: 1 ahost (ahost is your huge data array)
y: ahost -
So your kernel is computing y=alpha * ahost, but your CUBLAS call is computing y = alpha*1 + ahost. I wouldn't expect the same result from these, in general.
Your analysis of error seems flawed in a few ways. First, you are computing the absolute error in a float variable (a number which will always be positive, since it's the absolute value), but then you're comparing it against a negative number:
float diff = abs(host[i]-dev[i]);
...
if (diff > (-1e12))
won't that if test always be true? Perhaps you meant 1e-12 although that would still be flawed. Looking for a fixed error threshold on a floating point comparison should be scaled to the size of the numbers being compared. float quantities only contain about 6-7 accurate decimal digits. (And summing these errors is also troublesome.)
Here is a complete code that has the above issues fixed, and produces zero sum error for all the comparisons (host<->kernel and host<->cublas):
static float array[] = {0x00000000,
0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0x00000000,0xB58DA1CF,0xB50D2FEC,0x34A48536,0xB4A1D5BC,0x358E1345,0x35943AAC,0xB5983F40,0xB43628BB,0xB4A95348,0xB4DB751C,0xB50C8D1A,0xB3EFCBB5,0x3552B8CD,0x3538A167,0x358FDE0D,0xB4D54CE9,0xB5D29BB7,0xB4A234EE,0x346EF2F4,0x35B5D9F2,0xB40F1487,0x3554BC20,0x33FD9466,0xB536D37D,0xB3C2E594,0xB59DA581,0x3584FC87,0x34438F09,0x35D293CB,0xB4FBB002,0xB59F41E9};
#include <iostream>
#include <stdio.h>
#include <cublas_v2.h>
#include <assert.h>
#define TOL 0.0001
typedef unsigned int u32;
#define GET_STRIDE() u32(blockDim.x * gridDim.x)
#define GET_OFFSET() u32(blockIdx.x * blockDim.x + threadIdx.x)
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__global__ void div_elementwise(const u32 n, float* source, float* dest, const float value)
{
for (u32 i = GET_OFFSET(); i < n; i += GET_STRIDE())
{
dest[i] = source[i] * value;
}
}
float check_eq(float* dev, float* host, u32 len)
{
float sum = 0.0f;
for (u32 i = 0; i < len; ++i)
{
if (dev[i]!=host[i])
{
//printf("diff %d %f %f\n", i, dev[i], host[i]);
//break;
float diff = abs((host[i]-dev[i])/host[i]);
sum += diff;
if (diff > (TOL))
printf("diff %d %f\n", i, diff);
}
}
printf("%f\n", sum);
return sum;
}
void div_host(float* a, float v, u32 len)
{
for (u32 i = 0; i < len; ++i)
{
a[i]=a[i]*v;
}
}
int main()
{
u32 len = sizeof(array)/sizeof(float);
printf("array len = %d\n", len);
for (int i =0; i < len; i++) if (isnan(array[i])) {printf("nan value at %d\n",i); return -1;}
float* adev, *adevcublas, *d_zero;
float* ahost = (float*) malloc(len * sizeof(float));
checkCuda(cudaMalloc(&adev, len * sizeof(float)));
checkCuda(cudaMalloc(&adevcublas, len * sizeof(float)));
checkCuda(cudaMalloc(&d_zero, len * sizeof(float)));
memcpy(ahost, &array[0], len * sizeof(float));
checkCuda(cudaMemcpy(adev, ahost, len * sizeof(float), cudaMemcpyHostToDevice));
checkCuda(cudaMemcpy(adevcublas, ahost, len * sizeof(float), cudaMemcpyHostToDevice));
checkCuda(cudaMemset(d_zero, 0, len*sizeof(float)));
float alpha = 1/2494.f;
printf("%f\n", alpha);
div_host(ahost, alpha, len);
u32 tb = 256;
div_elementwise<<<((len + tb - 1) / tb),tb>>>(len, adev, adev, alpha);
float* r = (float*) malloc(len * sizeof(float));
checkCuda(cudaMemcpy(r, adev, len * sizeof(float), cudaMemcpyDeviceToHost));
check_eq(r,ahost,len);
cublasHandle_t ch;
cublasCreate(&ch);
float* r0 = (float*) malloc(len * sizeof(float));
cublasStatus_t stat = cublasSaxpy(ch, len, &alpha, adevcublas, 1, d_zero, 1);
if (stat != CUBLAS_STATUS_SUCCESS) {std::cout << "CUBLAS error: " << (int)stat << std::endl; return 1;}
checkCuda(cudaMemcpy(r0, d_zero, len * sizeof(float), cudaMemcpyDeviceToHost));
check_eq(r0,ahost,len);
free(r);
free(r0);
free(ahost);
cudaFree(adev);
return 0;
}

Example of increasing the work per thread in CUDA

Algorithm :
I'm writing a program with CUDA and the problem is the following:
Two matrices A (n * 128) and B (m * 128)
I take the first row of A, and I compute the distance between that vector and all the rows of B, one by one.
I write the result of each distance on a row of a matrix C, so the element C(i,j) of C contains the distance between row i of A and row j of B.
and I proceed with the next row of A.
I've implemented it this way: I've got a grid made by ( n * m ) blocks, and 128 threads per block. ( 1 * 128 ).
QUESTION: The program runs successfully with the expected results but the time execution is only around 5 to 10 times faster than the one-threaded CPU version of it. So I would like to know how to increase the work per thread before reduction in order to increase performance.
Kernel code (original : Not optimized)
__global__ void EuclideanDistances( float *A, float *B , float *C , int n , int m)
{
// SIZE is equal to 128
__shared__ float accumResult[SIZE];
float sA;
float sB;
// MAPPING
int bx = blockIdx.x; // n
int by = blockIdx.y; // m
int ty = threadIdx.y; // 128
int tx = threadIdx.x; // 1
sA = A [bx * SIZE + ty];
sB = B [by * SIZE + ty];
__syncthreads();
accumResult[ty] = (sA - sB) * (sA - sB);
__syncthreads();
// Parallel tree-reduction
for (int stride = SIZE/2 ; stride > 0 ; stride >>= 1)
if (ty < stride)
{
accumResult[ty] += accumResult [stride + ty];
__syncthreads();
}
// Writing results to output matrix
if ((threadIdx.y == 0))
C [bx * m + by] = accumResult[ty];
__syncthreads();
}
UPDATE
Now, I'm using another mapping : Instead of taking a grid of n by m blocks and a block of 128 threads, I'm increasing the number of threads within a block in order to decrease the number of blocks.
New mapping:
Block of 128 by 8 threads (total of 1024 threads, which is the max size)
Grid of n/8 by m/8 blocks
Unfortunately, it's giving wrong results ).
Optimized kernel code (to be updated)
__global__ void EuclideanDistances( float *A, float *B , float *C, int n , int m)
{
__shared__ float accumResult[SIZE][8];
__shared__ float sA[SIZE][8];
__shared__ float sB[SIZE][8];
int bx = blockIdx.x; // n / 8
int by = blockIdx.y; // m / 8
int tx = threadIdx.x; // 8
int ty = threadIdx.y; // 128
int i = bx * tx * SIZE + ty;
int j = by * tx * SIZE + ty;
sA[ty][tx] = A [i];
sB[ty][tx] = B[j];
__syncthreads();
accumResult[ty][tx] = (sA[ty][tx] - sB[ty][tx]) * (sA[ty][tx] - sB[ty][tx]);
__syncthreads();
// Reduction
for (int stride = SIZE/2 ; stride > 0 ; stride>>=1)
if (ty < stride)
{
accumResult[ty][tx] += accumResult [stride + ty][tx];
__syncthreads();
}
C[bx * m + by] = accumResult[0][tx];
}
HOST CODE (allocations + kernel calls)
int main()
{
int m = 20000; //MatrixA size : m * SIZE
int n = 4000; //MatrixB size : n * SIZE
srand((unsigned)time(0));
// Host Allocations
float *matrixA = (float *) malloc (n * SIZE * sizeof(float));
for(int i=0; i < n * SIZE; i++)
matrixA[i] = (float) (rand()%100)+1;
float *matrixB = (float *) malloc (m * SIZE * sizeof(float));
for(int i=0; i < m * SIZE; i++)
matrixB[i] = (float) (rand()%100)+1;
float *results_kernel1 = (float *) malloc (n * m * sizeof(float));
float *results_kernel2 = (float *) malloc (n * m * sizeof(float));
//Device Allocation
float *d_matrixA;
float *d_matrixB;
cudaMalloc((void **)&d_matrixA, n * SIZE * sizeof(float));
cudaMalloc((void **)&d_matrixB, m * SIZE * sizeof(float));
cudaMemcpy(d_matrixA , matrixA , n * SIZE * sizeof(float) , cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB , matrixB , m * SIZE * sizeof(float) , cudaMemcpyHostToDevice);
float *d_results_kernel1;
float *d_results_kernel2;
cudaMalloc((void **)&d_results_kernel1 , n * m * sizeof(float));
cudaMalloc((void **)&d_results_kernel2 , n * m * sizeof(float));
dim3 threads1 (1 , 128);
dim3 blocks1 (n , m);
EuclideanDistances1 <<<blocks1 , threads1>>> (d_matrixA , d_matrixB , d_results_kernel1 , n , m);
cudaDeviceSynchronize();
cudaMemcpy(results_kernel1 , d_results_kernel1 , n * m *sizeof(float) , cudaMemcpyDeviceToHost);
cudaFree(d_results_kernel1);
dim3 threads2 (8 , 128); // 1024 threads per block (maximum)
dim3 blocks2 (ceil((float)n/8) , ceil((float)m/8));
EuclideanDistances2 <<<blocks2 , threads2>>> (d_matrixA , d_matrixB , d_results_kernel2 , n , m);
cudaDeviceSynchronize();
cudaMemcpy(results_kernel2 , d_results_kernel2 , n * m *sizeof(float) , cudaMemcpyDeviceToHost);
cudaFree(d_results_kernel2);
// Visualising and comparing results
for (int i = 0 ; i < 50 ; i++)
std::cout << "kernel1 : " << results_kernel1[i] << " | kernel2 : " << results_kernel2[i] << std::endl;
free(matrixA);
free(matrixB);
free(results_kernel1);
free(results_kernel2);
return 0;
}
PS: I have CUDA 6.0 with a NVIDIA GTX 650 (compute capability 3.0)
It seems your question has 2 components:
why isn't my second kernel working?
how do I make my code run faster?
Why isn't my second kernel working?
You had several issues:
indexing problems in initial calculation of i, j as well as the index for storing the C value.
violation of usage of _syncthreads() inside a conditional block
item 1 was the key element to get the code working.
How do I make my code run faster?
This is more involved. First of all, your attempt at "increasing work per thread" didn't do anything of the kind, it was merely an increase in the number of threads per block (from 128 to 8*128). Each thread was doing approximately the same amount of work. Furthermore, in the process of going to a 2D threadblock for this attempt, I believe a couple of bad things happened:
various coalescing and shared-memory-bank-conflict load and store patterns were broken.
effective occupancy went down, due the amount of shared memory required per block.
The net effect of the second kernel was to approximately double the execution time. So that is not what we want.
However, increasing work per thread may be a good idea, along with using shared memory, as well as trying to preserve good (global, shared) memory access patterns, as well as allowing for increased occupancy.
What follows is a work-in-progress along those lines. The following code has your second kernel fixed, along with timing infrastructure, as well as full data verification, as well as 2 new kernels. The first new kernel (#3) is what I would call a "naive" kernel. It simply allocates one thread per output point, and each thread loops through the necessary vectors, computing its individual result. No usage of shared memory, or even much attention to coalescing or any other optimization. However with a tweak to threadblock configuration (16,16) -> (8,32) threads, which I observed from #talonmies answer (now deleted), this kernel performs significantly (3x) faster than your "fast" kernel. After further thought about the (8,32) observation, I concluded that the next attempt at optimization should focus on:
elimination of the usage of a parallel reduction to compute the vector distance (i.e. allow adjacent threads to use a straight for-loop to loop through the vectors)
maximization of benefit from the cache
efficient usage of shared memory
insist on perfect global coalescing/perfect usage of shared memory for all reads and writes
Item 4 prompted the question in the comments "may I transpose the matrices?" With this permission, it's possible to re-organize the data to facilitate item 4 above. Item 2 above is addressed in my "fast" kernel (#4) by loading the B vector into shared memory, while allowing the cache to mostly focus on caching the A vectors, hopefully reducing cache-thrashing (A is the smaller of the 2 vector arrays, at about 2MB - fermi L2 is 768K, Kepler L2 is 1.5MB). By delivering A in transposed form, and effectively "transposing" B on-chip from shared memory, it's possible to use a straight for-loop to compute the vector distance, while allowing adjacent threads to have perfectly coalesced reads and writes, as well as "efficient" use of shared memory (i.e. non-bank-conflicted loads, and broadcast reads).
For my particular timing, (Quadro5000 cc2.0 GPU, CUDA 6, RHEL 5.5) I see that your "fast" kernel requires about 2 seconds, my "naive" kernel requires about 0.7 seconds, and my "fast" kernel requires about 0.2 seconds, albeit with transposed (A,C) data.
EDIT: I've made one additional optimization, that is to have each block compute multiple (CHKSIZE) B vectors at one time. You can set CHKSIZE to 1 to see the previous result (~0.2sec). I found CHKSIZE of 4 gave good improvement. This is an attack at attempting to exploit the data re-use of A. With this additional optimization at CHKSIZE of 4, the kernel time for kernel 4 drops to about 0.1 second.
Following is the code and a sample run:
$ cat t460.cu
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
// both M and N must be evenly divisible by SIZE, M must be evenly divisible by CHKSIZE
#define SIZE 128
#define N 4000
#define M 20000
#define CHKSIZE 4
__global__ void EuclideanDistances1( float *A, float *B , float *C , int n , int m)
{
// SIZE is equal to 128
__shared__ float accumResult[SIZE];
float sA;
float sB;
// MAPPING
int bx = blockIdx.x; // n
int by = blockIdx.y; // m
int ty = threadIdx.y; // 128
//int tx = threadIdx.x; // 1
sA = A [bx * SIZE + ty];
sB = B [by * SIZE + ty];
__syncthreads();
accumResult[ty] = (sA - sB) * (sA - sB);
__syncthreads();
// Parallel tree-reduction
for (int stride = SIZE/2 ; stride > 0 ; stride >>= 1){
if (ty < stride)
{
accumResult[ty] += accumResult [stride + ty];
}
__syncthreads();
}
// Writing results to output matrix
if ((ty == 0))
C [bx * m + by] = accumResult[ty];
__syncthreads();
}
__global__ void EuclideanDistances2( float *A, float *B , float *C, int n , int m)
{
__shared__ float accumResult[SIZE][8];
__shared__ float sA[SIZE][8];
__shared__ float sB[SIZE][8];
int bx = blockIdx.x; // n / 8
int by = blockIdx.y; // m
int tx = threadIdx.x; // 8
int ty = threadIdx.y; // 128
int i = ((bx*8) + tx) * SIZE + ty;
int j = by * SIZE + ty;
sA[ty][tx] = A[i];
sB[ty][tx] = B[j];
__syncthreads();
accumResult[ty][tx] = (sA[ty][tx] - sB[ty][tx]) * (sA[ty][tx] - sB[ty][tx]);
__syncthreads();
// Reduction
for (int stride = SIZE/2 ; stride > 0 ; stride>>=1){
if (ty < stride)
{
accumResult[ty][tx] += accumResult [stride + ty][tx];
}
__syncthreads();
}
if (ty == 0)
C[((bx*8)+tx) * m + by] = accumResult[0][tx];
}
//naive kernel
__global__ void EuclideanDistances3( float *A, float *B , float *C, int n , int m){
int idx = threadIdx.x+blockDim.x*blockIdx.x;
int idy = threadIdx.y+blockDim.y*blockIdx.y;
float result = 0.0f;
if ((idx < n) && (idy < m)){
for (int i = 0; i < SIZE; i++){
float temp = A[(idx*SIZE)+i] - B[(idy*SIZE)+i];
result += temp * temp;}
C[(idx*m) + idy] = result;
}
}
//optimized kernel
__global__ void EuclideanDistances4( const float *A, const float *B , float *C, const int n , const int m){
// n, A, 4000 this kernel assumes A is column-major A(SIZE, n)
// m, B, 20000 this kernel assumes B is row-major B(m, SIZE)
// this kernel assumes C is column-major C(m,n)
// this kernel assumes number of threads per threadblock == SIZE
// CHKSIZE is the number of B vectors that will be compute per block
__shared__ float my_sB[CHKSIZE*SIZE]; // enough shared storage for CHKSIZE vectors of B
int bx = blockIdx.x; // one block per CHKSIZE rows of B (the larger input matrix)
while ((bx*CHKSIZE) < m){ // not used, this while loop could be used to extend a block to multiple chunks
int tx = threadIdx.x;
for (int i = 0; i < CHKSIZE; i++) // load vectors of B into shared memory
my_sB[(i*SIZE)+tx] = B[(((bx*CHKSIZE)+i)*SIZE)+tx];
__syncthreads();
while (tx < n){ //loop across all vectors in A
float result[CHKSIZE];
for (int i = 0; i < CHKSIZE; i++)
result[i] = 0.0f;
for (int i = 0; i < SIZE; i++){
float Atemp = A[(n*i)+tx];
for (int j = 0; j < CHKSIZE; j++){ // compute all CHKSIZE B vectors with read of A
float temp = Atemp - my_sB[i + (j*SIZE)];
result[j] += temp * temp;}}
for (int i = 0; i < CHKSIZE; i++) // store CHKSIZE results
C[((i+(bx*CHKSIZE))*n)+ tx] = result[i];
tx += blockDim.x; } // continue looping across vectors in A
__syncthreads(); // necessary to prevent warps from racing ahead, if block looping is used
bx += gridDim.x;}
}
float comp_euclid_sq(const float *rA, const float *rB, const int size){
float result = 0.0f;
float temp;
for (int i = 0; i < size; i++){
temp = (rA[i] - rB[i]);
result += temp * temp;}
return result;
}
int main()
{
float et1=0.0f, et2=0.0f, et3=0.0f, et4=0.0f;
cudaEvent_t start1, start2, start3,start4, stop1, stop2, stop3, stop4;
cudaEventCreate(&start1);
cudaEventCreate(&start2);
cudaEventCreate(&start3);
cudaEventCreate(&start4);
cudaEventCreate(&stop1);
cudaEventCreate(&stop2);
cudaEventCreate(&stop3);
cudaEventCreate(&stop4);
int n = N; //MatrixA size : n * SIZE
int m = M; //MatrixB size : m * SIZE
srand((unsigned)time(0));
// Host Allocations
float *matrixA = (float *) malloc (n * SIZE * sizeof(float));
for(int i=0; i < n * SIZE; i++)
matrixA[i] = (float) (rand()%100)+1;
float *matrixB = (float *) malloc (m * SIZE * sizeof(float));
for(int i=0; i < m * SIZE; i++)
matrixB[i] = (float) (rand()%100)+1;
float *results_kernel = (float *) malloc (n * m * sizeof(float));
float *cpu_results_kernel = (float *) malloc (n * m * sizeof(float));
for (int i = 0; i< n*m; i++)
cpu_results_kernel[i] = comp_euclid_sq(matrixA + ((i/m)*SIZE), matrixB + (i%m)*SIZE, SIZE);
//Device Allocation
float *d_matrixA;
float *d_matrixB;
cudaMalloc((void **)&d_matrixA, n * SIZE * sizeof(float));
cudaMalloc((void **)&d_matrixB, m * SIZE * sizeof(float));
cudaMemcpy(d_matrixA , matrixA , n * SIZE * sizeof(float) , cudaMemcpyHostToDevice);
cudaMemcpy(d_matrixB , matrixB , m * SIZE * sizeof(float) , cudaMemcpyHostToDevice);
float *d_results_kernel;
cudaMalloc((void **)&d_results_kernel , n * m * sizeof(float));
dim3 threads1 (1 , SIZE);
dim3 blocks1 (n , m);
cudaEventRecord(start1);
EuclideanDistances1 <<<blocks1 , threads1>>> (d_matrixA , d_matrixB , d_results_kernel , n , m);
cudaEventRecord(stop1);
cudaMemcpy(results_kernel , d_results_kernel , n * m *sizeof(float) , cudaMemcpyDeviceToHost);
for (int i = 0; i< n*m; i++) {
if (results_kernel[i] != cpu_results_kernel[i]) {printf("cpu/kernel1 mismatch at %d, cpu: %f, kernel1: %f\n", i, cpu_results_kernel[i], results_kernel[i]); return 1;}}
cudaMemset(d_results_kernel, 0, n*m*sizeof(float));
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&et1, start1, stop1);
dim3 threads2 (8 , SIZE); // 1024 threads per block (maximum)
dim3 blocks2 (n/8 , m); // assumes n evenly divisible by 8
cudaEventRecord(start2);
EuclideanDistances2 <<<blocks2 , threads2>>> (d_matrixA , d_matrixB , d_results_kernel , n , m);
cudaEventRecord(stop2);
cudaMemcpy(results_kernel , d_results_kernel , n * m *sizeof(float) , cudaMemcpyDeviceToHost);
for (int i = 0; i< n*m; i++) {
if (results_kernel[i] != cpu_results_kernel[i]) {printf("cpu/kernel2 mismatch at %d, cpu: %f, kernel1: %f\n", i, cpu_results_kernel[i], results_kernel[i]); return 1;}}
cudaMemset(d_results_kernel, 0, n*m*sizeof(float));
cudaEventSynchronize(stop2);
cudaEventElapsedTime(&et2, start2, stop2);
cudaFuncSetCacheConfig(EuclideanDistances3, cudaFuncCachePreferL1);
dim3 threads3 (8, 32); // 1024 threads per block (maximum)
dim3 blocks3 (n/threads3.x , m/threads3.y); // assumes evenly divisible
cudaEventRecord(start3);
EuclideanDistances3 <<<blocks3 , threads3>>> (d_matrixA , d_matrixB , d_results_kernel , n , m);
cudaEventRecord(stop3);
cudaMemcpy(results_kernel , d_results_kernel , n * m *sizeof(float) , cudaMemcpyDeviceToHost);
for (int i = 0; i< n*m; i++) {
if (results_kernel[i] != cpu_results_kernel[i]) {printf("cpu/kernel3 mismatch at %d, cpu: %f, kernel3: %f\n", i, cpu_results_kernel[i], results_kernel[i]); return 1;}}
cudaMemset(d_results_kernel, 0, n*m*sizeof(float));
cudaEventSynchronize(stop3);
cudaEventElapsedTime(&et3, start3, stop3);
// transpose matrix A
float *matrixA_T = (float *) malloc (n * SIZE * sizeof(float));
for (int i = 0; i < n; i++)
for (int j = 0; j < SIZE; j++)
matrixA_T[(j*n)+i] = matrixA[(i*SIZE)+j];
cudaMemcpy(d_matrixA , matrixA_T , n * SIZE * sizeof(float) , cudaMemcpyHostToDevice);
cudaFuncSetCacheConfig(EuclideanDistances4, cudaFuncCachePreferL1);
dim3 threads4(SIZE); // one thread per vector element
dim3 blocks4(m/CHKSIZE);
cudaEventRecord(start4);
EuclideanDistances4 <<<blocks4 , threads4>>> (d_matrixA , d_matrixB , d_results_kernel , n , m);
cudaEventRecord(stop4);
cudaMemcpy(results_kernel , d_results_kernel , n * m *sizeof(float) , cudaMemcpyDeviceToHost);
// test for correct transposed result C(m,n)
for (int i = 0; i< n; i++)
for (int j = 0; j < m; j++)
if (results_kernel[(j*n)+i] != cpu_results_kernel[(i*m)+j]) {printf("cpu/kernel4 mismatch at %d,%d, cpu: %f, kernel4: %f\n", i,j, cpu_results_kernel[(i*m)+j], results_kernel[(j*n)+i]); return 1;}
cudaEventSynchronize(stop4);
cudaEventElapsedTime(&et4, start4, stop4);
cudaFree(d_results_kernel);
printf("Success!\n");
printf("kernel1 : %.fms, kernel2 : %.fms, kernel3 : %.fms, kernel4 : %.fms\n", et1, et2, et3, et4);
free(matrixA);
free(matrixB);
free(results_kernel);
return 0;
}
$ nvcc -O3 -arch=sm_20 -o t460 t460.cu
$ ./t460
Success!
kernel1 : 2213ms, kernel2 : 4660ms, kernel3 : 691ms, kernel4 : 99ms
$
Hopefully that will get you going with more ideas of things to work on. You may get different timings of course on your cc3.0 device.
Are further optimizations possible? Probably. The first target I would look at would be to figure out how to take advantage of the data-reuse opportunities on vector A. (data re-use of vector B is already handled in the kernel 4 by loading it into shared memory. There may be ways to use some shared memory to store portions of A to make the code run even faster.)
I guess I should also mention that following the lead of the code you provided, this code is computing the square of the euclidean distance. A trivial modification to the kernels can make it compute the actual euclidean distance instead (C[...] = sqrtf(...);) The validation I have included, however, assumes the results are "in-range" for perfect storage of an integer quantity in a float. Your test case satisfies this requirement, but otherwise the validation code would need to be modified (if sqrtf were used).

Cuda, calculate distance matrix between 3d objects

I have a "string"(molecule) of connected N objects(atoms) in 3D (each atom has a coordinates). And I need to calculate a distance between each pair of atoms in a molecule (see pseudo code below ). How could it be done with CUDA? Should I pass to a kernel function 2 3D Arrays? Or 3 arrays with coordinates: X[N], Y[N], Z[N]? Thanks.
struct atom
{
double x,y,z;
}
int main()
{
//N number of atoms in a molecule
double DistanceMatrix[N][N];
double d;
atom Atoms[N];
for (int i = 0; i < N; i ++)
for (int j = 0; j < N; j++)
DistanceMatrix[i][j] = (atoms[i].x -atoms[j].x)*(atoms[i].x -atoms[j].x) +
(atoms[i].y -atoms[j].y)* (atoms[i].y -atoms[j].y) + (atoms[i].z -atoms[j].z)* (atoms[i].z -atoms[j].z;
}
Unless you're working with very large molecules, there probably won't be enough work to keep the GPU busy, so calculations will be faster with the CPU.
If you meant to calculate the Euclidean distance, your calculation is not correct. You need the 3D version of the Pythagorean theorem.
I would use a SoA for storing the coordinates.
You want to generate a memory access pattern with as many coalesced reads and writes as possible. To do that, arrange for addresses or indexes generated by the 32 threads in each warp to be as close to each other as possible (a bit simplified).
threadIdx designates thread indexes within a block and blockIdx designates block indexes within the grid. blockIdx is always the same for all threads in a warp. Only threadIdx varies within the threads in a block. To visualize how the 3 dimensions of threadIdx are assigned to threads, think of them as nested loops where x is the inner loop and z is the outer loop. So, threads with adjacent x values are the most likely to be within the same warp and, if x is divisible by 32, only threads sharing the same x / 32 value are within the same warp.
I have included a complete example for your algorithm below. In the example, the i index is derived from threadIdx.x so, to check that warps would generate coalesced reads and writes, I would go over the code while inserting a few consecutive values such as 0, 1 and 2 for i and checking that the generated indexes would also be consecutive.
Addresses generated from the j index are less important as j is derived from threadIdx.y and so is less likely to vary within a warp (and will never vary if threadIdx.x is divisible by 32).
#include "cuda_runtime.h"
#include <iostream>
using namespace std;
const int N(20);
#define check(ans) { _check((ans), __FILE__, __LINE__); }
inline void _check(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess) {
fprintf(stderr,"CUDA Error: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
int div_up(int a, int b) {
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
__global__ void calc_distances(double* distances,
double* atoms_x, double* atoms_y, double* atoms_z);
int main(int argc, char **argv)
{
double* atoms_x_h;
check(cudaMallocHost(&atoms_x_h, N * sizeof(double)));
double* atoms_y_h;
check(cudaMallocHost(&atoms_y_h, N * sizeof(double)));
double* atoms_z_h;
check(cudaMallocHost(&atoms_z_h, N * sizeof(double)));
for (int i(0); i < N; ++i) {
atoms_x_h[i] = i;
atoms_y_h[i] = i;
atoms_z_h[i] = i;
}
double* atoms_x_d;
check(cudaMalloc(&atoms_x_d, N * sizeof(double)));
double* atoms_y_d;
check(cudaMalloc(&atoms_y_d, N * sizeof(double)));
double* atoms_z_d;
check(cudaMalloc(&atoms_z_d, N * sizeof(double)));
check(cudaMemcpy(atoms_x_d, atoms_x_h, N * sizeof(double), cudaMemcpyHostToDevice));
check(cudaMemcpy(atoms_y_d, atoms_y_h, N * sizeof(double), cudaMemcpyHostToDevice));
check(cudaMemcpy(atoms_z_d, atoms_z_h, N * sizeof(double), cudaMemcpyHostToDevice));
double* distances_d;
check(cudaMalloc(&distances_d, N * N * sizeof(double)));
const int threads_per_block(256);
dim3 n_blocks(div_up(N, threads_per_block));
calc_distances<<<n_blocks, threads_per_block>>>(distances_d, atoms_x_d, atoms_y_d, atoms_z_d);
check(cudaPeekAtLastError());
check(cudaDeviceSynchronize());
double* distances_h;
check(cudaMallocHost(&distances_h, N * N * sizeof(double)));
check(cudaMemcpy(distances_h, distances_d, N * N * sizeof(double), cudaMemcpyDeviceToHost));
for (int i(0); i < N; ++i) {
for (int j(0); j < N; ++j) {
cout << "(" << i << "," << j << "): " << distances_h[i + N * j] << endl;
}
}
check(cudaFree(distances_d));
check(cudaFreeHost(distances_h));
check(cudaFree(atoms_x_d));
check(cudaFreeHost(atoms_x_h));
check(cudaFree(atoms_y_d));
check(cudaFreeHost(atoms_y_h));
check(cudaFree(atoms_z_d));
check(cudaFreeHost(atoms_z_h));
return 0;
}
__global__ void calc_distances(double* distances,
double* atoms_x, double* atoms_y, double* atoms_z)
{
int i(threadIdx.x + blockIdx.x * blockDim.x);
int j(threadIdx.y + blockIdx.y * blockDim.y);
if (i >= N || j >= N) {
return;
}
distances[i + N * j] =
(atoms_x[i] - atoms_x[j]) * (atoms_x[i] - atoms_x[j]) +
(atoms_y[i] - atoms_y[j]) * (atoms_y[i] - atoms_y[j]) +
(atoms_z[i] - atoms_z[j]) * (atoms_z[i] - atoms_z[j]);
}