CUDA Dynamic Parallelism, bad performance - c++

We are having performance issues when using the CUDA Dynamic Parallelism. At this moment, CDP is performing at least 3X slower than a traditional approach.
We made the simplest reproducible code to show this issue, which is to increment the value of all elements of an array by +1. i.e.,
a[0,0,0,0,0,0,0,.....,0] --> kernel +1 --> a[1,1,1,1,1,1,1,1,1]
The point of this simple example is just to see if CDP can perform as the others, or if there are serious overheads.
The code is here:
#include <stdio.h>
#include <cuda.h>
#define BLOCKSIZE 512
__global__ void kernel_parent(int *a, int n, int N);
__global__ void kernel_simple(int *a, int n, int N, int offset);
// N is the total array size
// n is the worksize for a kernel (one third of N)
__global__ void kernel_parent(int *a, int n, int N){
cudaStream_t s1, s2;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&s2, cudaStreamNonBlocking);
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid == 0){
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (n + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
kernel_simple<<< grid, block, 0, s1 >>> (a, n, N, n);
kernel_simple<<< grid, block, 0, s2 >>> (a, n, N, 2*n);
}
a[tid] += 1;
}
__global__ void kernel_simple(int *a, int n, int N, int offset){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int pos = tid + offset;
if(pos < N){
a[pos] += 1;
}
}
int main(int argc, char **argv){
if(argc != 3){
fprintf(stderr, "run as ./prog n method\nn multiple of 32 eg: 1024, 1048576 (1024^2), 4194304 (2048^2), 16777216 (4096^2)\nmethod:\n0 (traditional) \n1 (dynamic parallelism)\n2 (three kernels using unique streams)\n");
exit(EXIT_FAILURE);
}
int N = atoi(argv[1])*3;
int method = atoi(argv[2]);
// init array as 0
int *ah, *ad;
printf("genarray of 3*N = %i.......", N); fflush(stdout);
ah = (int*)malloc(sizeof(int)*N);
for(int i=0; i<N; ++i){
ah[i] = 0;
}
printf("done\n"); fflush(stdout);
// malloc and copy array to gpu
printf("cudaMemcpy:Host->Device..........", N); fflush(stdout);
cudaMalloc(&ad, sizeof(int)*N);
cudaMemcpy(ad, ah, sizeof(int)*N, cudaMemcpyHostToDevice);
printf("done\n"); fflush(stdout);
// kernel launch (timed)
cudaStream_t s1, s2, s3;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&s2, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&s3, cudaStreamNonBlocking);
cudaEvent_t start, stop;
float rtime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Kernel...........................", N); fflush(stdout);
if(method == 0){
// CLASSIC KERNEL LAUNCH
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (N + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
cudaEventRecord(start, 0);
kernel_simple<<< grid, block >>> (ad, N, N, 0);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
}
else if(method == 1){
// DYNAMIC PARALLELISM
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (N/3 + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
cudaEventRecord(start, 0);
kernel_parent<<< grid, block, 0, s1 >>> (ad, N/3, N);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
}
else{
// THREE CONCURRENT KERNEL LAUNCHES USING STREAMS
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (N/3 + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
cudaEventRecord(start, 0);
kernel_simple<<< grid, block, 0, s1 >>> (ad, N/3, N, 0);
kernel_simple<<< grid, block, 0, s2 >>> (ad, N/3, N, N/3);
kernel_simple<<< grid, block, 0, s3 >>> (ad, N/3, N, 2*(N/3));
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
}
printf("done\n"); fflush(stdout);
printf("cudaMemcpy:Device->Host..........", N); fflush(stdout);
cudaMemcpy(ah, ad, sizeof(int)*N, cudaMemcpyDeviceToHost);
printf("done\n"); fflush(stdout);
printf("checking result.................."); fflush(stdout);
for(int i=0; i<N; ++i){
if(ah[i] != 1){
fprintf(stderr, "bad element: a[%i] = %i\n", i, ah[i]);
exit(EXIT_FAILURE);
}
}
printf("done\n"); fflush(stdout);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&rtime, start, stop);
printf("rtime: %f ms\n", rtime); fflush(stdout);
return EXIT_SUCCESS;
}
Can be compiled with
nvcc -arch=sm_35 -rdc=true -lineinfo -lcudadevrt -use_fast_math main.cu -o prog
This example can compute the result with 3 methods:
Simple Kernel: Just a single classic kernel +1 pass on the array.
Dynamic Parallelism: from main(), call a parent kernel which does +1 on the range [0,N/3), and also calls two child kernels. The first child does +1 in the range [N/3, 2*N/3), the second child in the range [2*N/3,N). Childs are launched using different streams so they can be concurrent.
Three Streams from Host: This one just launches three non-blocking streams from main(), one for each third of the array.
I get the following profile for method 0 (simple kernel):
The following for method 1 (dynamic parallelism):
And the following for method 2 (Three Streams from Host)
The running times are like this:
➜ simple-cdp git:(master) ✗ ./prog 16777216 0
genarray of 3*N = 50331648.......done
cudaMemcpy:Host->Device..........done
Kernel...........................done
cudaMemcpy:Device->Host..........done
checking result..................done
rtime: 1.140928 ms
➜ simple-cdp git:(master) ✗ ./prog 16777216 1
genarray of 3*N = 50331648.......done
cudaMemcpy:Host->Device..........done
Kernel...........................done
cudaMemcpy:Device->Host..........done
checking result..................done
rtime: 5.790048 ms
➜ simple-cdp git:(master) ✗ ./prog 16777216 2
genarray of 3*N = 50331648.......done
cudaMemcpy:Host->Device..........done
Kernel...........................done
cudaMemcpy:Device->Host..........done
checking result..................done
rtime: 1.011936 ms
The main problem, visible from the pictures, is that in the Dynamic Parallelism method the parent kernel is taking excessive amount of time to close after the two child kernels have finished, which is what is making it take 3X or 4X times more. Even when considering the worst case, if all three kernels (parent and two childs) run in serial, it should take much less. I.e., there is N/3 of work for each kernel, so the whole parent kernel should take approx 3 child kernels long, which is much less. Is there a way to solve this problem?
EDIT: The serialization phenomenon of the child kernels, as well as for method 2, have been explained by Robert Crovella in the comments (many thanks). The fact that the kernels did run in serial do not invalidate the problem described in bold text (not for now at least).

Calls into the device runtime are "expensive", just like calls into the host runtime are expensive. In this case, it seems that you are calling into the device runtime to create streams for every thread, even though this code only requires them for thread 0.
By modifying your code to only request the stream creation for thread 0, we can produce timing parity between the case where we are using separate streams for the child kernel launch, and the case where we are not using separate streams for the child kernel launch:
$ cat t370.cu
#include <stdio.h>
#define BLOCKSIZE 512
__global__ void kernel_parent(int *a, int n, int N);
__global__ void kernel_simple(int *a, int n, int N, int offset);
// N is the total array size
// n is the worksize for a kernel (one third of N)
__global__ void kernel_parent(int *a, int n, int N){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid == 0){
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (n + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
#ifdef USE_STREAMS
cudaStream_t s1, s2;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&s2, cudaStreamNonBlocking);
kernel_simple<<< grid, block, 0, s1 >>> (a, n, N, n);
kernel_simple<<< grid, block, 0, s2 >>> (a, n, N, 2*n);
#else
kernel_simple<<< grid, block >>> (a, n, N, n);
kernel_simple<<< grid, block >>> (a, n, N, 2*n);
#endif
// these next 2 lines add noticeably to the overall timing
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) printf("oops1: %d\n", (int)err);
}
a[tid] += 1;
}
__global__ void kernel_simple(int *a, int n, int N, int offset){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int pos = tid + offset;
if(pos < N){
a[pos] += 1;
}
}
int main(int argc, char **argv){
if(argc != 3){
fprintf(stderr, "run as ./prog n method\nn multiple of 32 eg: 1024, 1048576 (1024^2), 4194304 (2048^2), 16777216 (4096^2)\nmethod:\n0 (traditional) \n1 (dynamic parallelism)\n2 (three kernels using unique streams)\n");
exit(EXIT_FAILURE);
}
int N = atoi(argv[1])*3;
int method = atoi(argv[2]);
// init array as 0
int *ah, *ad;
printf("genarray of 3*N = %i.......", N); fflush(stdout);
ah = (int*)malloc(sizeof(int)*N);
for(int i=0; i<N; ++i){
ah[i] = 0;
}
printf("done\n"); fflush(stdout);
// malloc and copy array to gpu
printf("cudaMemcpy:Host->Device..........", N); fflush(stdout);
cudaMalloc(&ad, sizeof(int)*N);
cudaMemcpy(ad, ah, sizeof(int)*N, cudaMemcpyHostToDevice);
printf("done\n"); fflush(stdout);
// kernel launch (timed)
cudaStream_t s1, s2, s3;
cudaStreamCreateWithFlags(&s1, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&s2, cudaStreamNonBlocking);
cudaStreamCreateWithFlags(&s3, cudaStreamNonBlocking);
cudaEvent_t start, stop;
float rtime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Kernel...........................", N); fflush(stdout);
if(method == 0){
// CLASSIC KERNEL LAUNCH
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (N + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
cudaEventRecord(start, 0);
kernel_simple<<< grid, block >>> (ad, N, N, 0);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
}
else if(method == 1){
// DYNAMIC PARALLELISM
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (N/3 + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
cudaEventRecord(start, 0);
kernel_parent<<< grid, block, 0, s1 >>> (ad, N/3, N);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
}
else{
// THREE CONCURRENT KERNEL LAUNCHES USING STREAMS
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid( (N/3 + BLOCKSIZE - 1)/BLOCKSIZE, 1, 1);
cudaEventRecord(start, 0);
kernel_simple<<< grid, block, 0, s1 >>> (ad, N/3, N, 0);
kernel_simple<<< grid, block, 0, s2 >>> (ad, N/3, N, N/3);
kernel_simple<<< grid, block, 0, s3 >>> (ad, N/3, N, 2*(N/3));
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
}
printf("done\n"); fflush(stdout);
printf("cudaMemcpy:Device->Host..........", N); fflush(stdout);
cudaMemcpy(ah, ad, sizeof(int)*N, cudaMemcpyDeviceToHost);
printf("done\n"); fflush(stdout);
printf("checking result.................."); fflush(stdout);
for(int i=0; i<N; ++i){
if(ah[i] != 1){
fprintf(stderr, "bad element: a[%i] = %i\n", i, ah[i]);
exit(EXIT_FAILURE);
}
}
printf("done\n"); fflush(stdout);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&rtime, start, stop);
printf("rtime: %f ms\n", rtime); fflush(stdout);
return EXIT_SUCCESS;
}
$ nvcc -arch=sm_52 -rdc=true -lcudadevrt -o t370 t370.cu
$ ./t370 16777216 1
genarray of 3*N = 50331648.......done
cudaMemcpy:Host->Device..........done
Kernel...........................done
cudaMemcpy:Device->Host..........done
checking result..................done
rtime: 6.925632 ms
$ nvcc -arch=sm_52 -rdc=true -lcudadevrt -o t370 t370.cu -DUSE_STREAMS
$ ./t370 16777216 1
genarray of 3*N = 50331648.......done
cudaMemcpy:Host->Device..........done
Kernel...........................done
cudaMemcpy:Device->Host..........done
checking result..................done
rtime: 6.673568 ms
$
Although not included in the test output above, according to my testing, this also brings the CUDA dynamic parallelism (CDP) case (1) into "approximate parity" with the non-CDP cases (0, 2). Note that we can shave about 1 ms (!) off the above time by forgoing the call to cudaGetLastError() in the parent kernel (which I added to your code).

#include <stdio.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using thrust::host_vector;
using thrust::device_vector;
#define BLOCKSIZE 512
__global__ void child(int* a)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
a[0]++;
}
__global__ void parent(int* a)
{
if (threadIdx.x == 0 && blockIdx.x == 0)
child<<<gridDim, blockDim>>>(a);
}
#define NBLOCKS 1024
#define NTHREADS 1024
#define BENCHCOUNT 1000
template<typename Lambda>
void runBench(Lambda arg, int* rp, const char* name)
{
// "preheat" the GPU
for (int i = 0; i < 100; i++)
child<<<dim3(NBLOCKS,1,1), dim3(NTHREADS,1,1)>>>(rp);
cudaEvent_t start, stop;
float rtime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < BENCHCOUNT; i++)
arg();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&rtime, start, stop);
printf("=== %s ===\n", name);
printf("time: %f ms\n", rtime/BENCHCOUNT); fflush(stdout);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
}
int main(int argc, char **argv)
{
host_vector<int> hv(1);
hv[0] = 0xAABBCCDD;
device_vector<int> dv(1);
dv = hv;
int* rp = thrust::raw_pointer_cast(&dv[0]);
auto benchFun = [&](void) {
child<<<dim3(NBLOCKS,1,1), dim3(NTHREADS,1,1)>>>(rp); };
runBench(benchFun, rp, "Single kernel launch");
auto benchFun2 = [&](void) {
for (int j = 0; j < 2; j++)
child<<<dim3(NBLOCKS,1,1), dim3(NTHREADS,1,1)>>>(rp);
};
runBench(benchFun2, rp, "2x sequential kernel launch");
auto benchFunDP = [&](void) {
parent<<<dim3(NBLOCKS,1,1), dim3(NTHREADS,1,1)>>>(rp); };
runBench(benchFunDP, rp, "Nested kernel launch");
}
To build/run:
Copy/paste code above to dpar.cu
nvcc -arch=sm_52 -rdc=true -std=c++11 -lcudadevrt -o dpar dpar.cu
./dpar
On my p5000 laptop it prints:
=== Single kernel launch ===
time: 0.014297 ms
=== 2x sequential kernel launch ===
time: 0.030468 ms
=== Nested kernel launch ===
time: 0.083820 ms
So the overhead is quite large.. looks like in my case 43 microseconds.

Related

CUDA kernel stops working when using cooperative groups grid sync() function [duplicate]

I’m trying to write a kernel whose threads iteratively process items in a work queue. My understanding is that I should be able to do this by using atomic operations to manipulate the work queue (i.e., grab work items from the queue and insert new work items into the queue), and using grid synchronization via cooperative groups to ensure all threads are at the same iteration (I ensure the number of thread blocks doesn’t exceed the device capacity for the kernel). However, sometimes I observe that work items are skipped or processed multiple times during an iteration.
The following code is a working example to show this. In this example, an array with the size of input_len is created, which holds work items 0 to input_len - 1. The processWorkItems kernel processes these items for max_iter iterations. Each work item can put itself and its previous and next work items in the work queue, but marked array is used to ensure that during an iteration, each work item is added to the work queue at most once. What should happen in the end is that the sum of values in histogram be equal to input_len * max_iter, and no value in histogram be greater than 1. But I observe that occasionally both of these criteria are violated in the output, which implies that I’m not getting atomic operations and/or proper synchronization. I would appreciate it if someone could point out the flaws in my reasoning and/or implementation. My OS is Ubuntu 18.04, CUDA version is 10.1, and I’ve run experiments on P100, V100, and RTX 2080 Ti GPUs, and observed similar behavior.
The command I use for compiling for RTX 2080 Ti:
nvcc -O3 -o atomicsync atomicsync.cu --gpu-architecture=compute_75 -rdc=true
Some inputs and outputs of runs on RTX 2080 Ti:
./atomicsync 50 1000 1000
Skipped 0.01% of items. 5 extra item processing.
./atomicsync 500 1000 1000
Skipped 0.00% of items. 6 extra item processing.
./atomicsync 5000 1000 1000
Skipped 0.00% of items. 14 extra item processing.
atomicsync.cu:
#include <stdio.h>
#include <cooperative_groups.h>
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
template< typename T >
void check(T result, char const *const func, const char *const file, int const line)
{
if (result)
{
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line, static_cast<unsigned int>(result), cudaGetErrorString(result), func);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
__device__ inline void addWorkItem(int input_len, int item, int item_adder, int iter, int *queue, int *queue_size, int *marked) {
int already_marked = atomicExch(&marked[item], 1);
if(already_marked == 0) {
int idx = atomicAdd(&queue_size[iter + 1], 1);
queue[(iter + 1) * input_len + idx] = item;
}
}
__global__ void processWorkItems(int input_len, int max_iter, int *histogram, int *queue, int *queue_size, int *marked) {
auto grid = cooperative_groups::this_grid();
const int items_per_block = (input_len + gridDim.x - 1) / gridDim.x;
for(int iter = 0; iter < max_iter; ++iter) {
while(true) {
// Grab work item to process
int idx = atomicSub(&queue_size[iter], 1);
--idx;
if(idx < 0) {
break;
}
int item = queue[iter * input_len + idx];
// Keep track of processed work items
++histogram[iter * input_len + item];
// Add previous, self, and next work items to work queue
if(item > 0) {
addWorkItem(input_len, item - 1, item, iter, queue, queue_size, marked);
}
addWorkItem(input_len, item, item, iter, queue, queue_size, marked);
if(item + 1 < input_len) {
addWorkItem(input_len, item + 1, item, iter, queue, queue_size, marked);
}
}
__threadfence_system();
grid.sync();
// Reset marked array for next iteration
for(int i = 0; i < items_per_block; ++i) {
if(blockIdx.x * items_per_block + i < input_len) {
marked[blockIdx.x * items_per_block + i] = 0;
}
}
__threadfence_system();
grid.sync();
}
}
int main(int argc, char* argv[])
{
int input_len = atoi(argv[1]);
int max_iter = atoi(argv[2]);
int num_blocks = atoi(argv[3]);
// A histogram to keep track of work items that have been processed in each iteration
int histogram_host[input_len * max_iter];
memset(histogram_host, 0, sizeof(int) * input_len * max_iter);
int *histogram_device;
checkCudaErrors(cudaMalloc(&histogram_device, sizeof(int) * input_len * max_iter));
checkCudaErrors(cudaMemcpy(histogram_device, histogram_host, sizeof(int) * input_len * max_iter, cudaMemcpyHostToDevice));
// Size of the work queue for each iteration
int queue_size_host[max_iter + 1];
queue_size_host[0] = input_len;
memset(&queue_size_host[1], 0, sizeof(int) * max_iter);
int *queue_size_device;
checkCudaErrors(cudaMalloc(&queue_size_device, sizeof(int) * (max_iter + 1)));
checkCudaErrors(cudaMemcpy(queue_size_device, queue_size_host, sizeof(int) * (max_iter + 1), cudaMemcpyHostToDevice));
// Work queue
int queue_host[input_len * (max_iter + 1)];
for(int i = 0; i < input_len; ++i) {
queue_host[i] = i;
}
memset(&queue_host[input_len], 0, sizeof(int) * input_len * max_iter);
int *queue_device;
checkCudaErrors(cudaMalloc(&queue_device, sizeof(int) * input_len * (max_iter + 1)));
checkCudaErrors(cudaMemcpy(queue_device, queue_host, sizeof(int) * input_len * (max_iter + 1), cudaMemcpyHostToDevice));
// An array used to keep track of work items already added to the work queue to
// avoid multiple additions of a work item in the same iteration
int marked_host[input_len];
memset(marked_host, 0, sizeof(int) * input_len);
int *marked_device;
checkCudaErrors(cudaMalloc(&marked_device, sizeof(int) * input_len));
checkCudaErrors(cudaMemcpy(marked_device, marked_host, sizeof(int) * input_len, cudaMemcpyHostToDevice));
const dim3 threads(1, 1, 1);
const dim3 blocks(num_blocks, 1, 1);
processWorkItems<<<blocks, threads>>>(input_len, max_iter, histogram_device, queue_device, queue_size_device, marked_device);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(histogram_host, histogram_device, sizeof(int) * input_len * max_iter, cudaMemcpyDeviceToHost));
int extra = 0;
double deficit = 0;
for(int i = 0; i < input_len; ++i) {
int cnt = 0;
for(int iter = 0; iter < max_iter; ++iter) {
if(histogram_host[iter * input_len + i] > 1) {
++extra;
}
cnt += histogram_host[iter * input_len + i];
}
deficit += max_iter - cnt;
}
printf("Skipped %.2f%% of items. %d extra item processing.\n", deficit / (input_len * max_iter) * 100, extra);
checkCudaErrors(cudaFree(histogram_device));
checkCudaErrors(cudaFree(queue_device));
checkCudaErrors(cudaFree(queue_size_device));
checkCudaErrors(cudaFree(marked_device));
return 0;
}
You may wish to read how to do a cooperative grid kernel launch in the programming gude or study any of the cuda sample codes (e.g. reductionMultiBlockCG, and there are others) that use a grid sync.
You're doing it incorrectly. You cannot launch a cooperative grid with ordinary <<<...>>> launch syntax. Because of that, there is no reason to assume that the grid.sync() in your kernel is working correctly.
It's easy to see the grid sync is not working in your code by running it under cuda-memcheck. When you do that the results will get drastically worse.
When I modify your code to do a proper cooperative launch, I have no issues on Tesla V100:
$ cat t1811.cu
#include <stdio.h>
#include <cooperative_groups.h>
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
template< typename T >
void check(T result, char const *const func, const char *const file, int const line)
{
if (result)
{
fprintf(stderr, "CUDA error at %s:%d code=%d(%s) \"%s\" \n", file, line, static_cast<unsigned int>(result), cudaGetErrorString(result), func);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
__device__ inline void addWorkItem(int input_len, int item, int item_adder, int iter, int *queue, int *queue_size, int *marked) {
int already_marked = atomicExch(&marked[item], 1);
if(already_marked == 0) {
int idx = atomicAdd(&queue_size[iter + 1], 1);
queue[(iter + 1) * input_len + idx] = item;
}
}
__global__ void processWorkItems(int input_len, int max_iter, int *histogram, int *queue, int *queue_size, int *marked) {
auto grid = cooperative_groups::this_grid();
const int items_per_block = (input_len + gridDim.x - 1) / gridDim.x;
for(int iter = 0; iter < max_iter; ++iter) {
while(true) {
// Grab work item to process
int idx = atomicSub(&queue_size[iter], 1);
--idx;
if(idx < 0) {
break;
}
int item = queue[iter * input_len + idx];
// Keep track of processed work items
++histogram[iter * input_len + item];
// Add previous, self, and next work items to work queue
if(item > 0) {
addWorkItem(input_len, item - 1, item, iter, queue, queue_size, marked);
}
addWorkItem(input_len, item, item, iter, queue, queue_size, marked);
if(item + 1 < input_len) {
addWorkItem(input_len, item + 1, item, iter, queue, queue_size, marked);
}
}
__threadfence_system();
grid.sync();
// Reset marked array for next iteration
for(int i = 0; i < items_per_block; ++i) {
if(blockIdx.x * items_per_block + i < input_len) {
marked[blockIdx.x * items_per_block + i] = 0;
}
}
__threadfence_system();
grid.sync();
}
}
int main(int argc, char* argv[])
{
int input_len = atoi(argv[1]);
int max_iter = atoi(argv[2]);
int num_blocks = atoi(argv[3]);
// A histogram to keep track of work items that have been processed in each iteration
int *histogram_host = new int[input_len * max_iter];
memset(histogram_host, 0, sizeof(int) * input_len * max_iter);
int *histogram_device;
checkCudaErrors(cudaMalloc(&histogram_device, sizeof(int) * input_len * max_iter));
checkCudaErrors(cudaMemcpy(histogram_device, histogram_host, sizeof(int) * input_len * max_iter, cudaMemcpyHostToDevice));
// Size of the work queue for each iteration
int queue_size_host[max_iter + 1];
queue_size_host[0] = input_len;
memset(&queue_size_host[1], 0, sizeof(int) * max_iter);
int *queue_size_device;
checkCudaErrors(cudaMalloc(&queue_size_device, sizeof(int) * (max_iter + 1)));
checkCudaErrors(cudaMemcpy(queue_size_device, queue_size_host, sizeof(int) * (max_iter + 1), cudaMemcpyHostToDevice));
// Work queue
int *queue_host = new int[input_len * (max_iter + 1)];
for(int i = 0; i < input_len; ++i) {
queue_host[i] = i;
}
memset(&queue_host[input_len], 0, sizeof(int) * input_len * max_iter);
int *queue_device;
checkCudaErrors(cudaMalloc(&queue_device, sizeof(int) * input_len * (max_iter + 1)));
checkCudaErrors(cudaMemcpy(queue_device, queue_host, sizeof(int) * input_len * (max_iter + 1), cudaMemcpyHostToDevice));
// An array used to keep track of work items already added to the work queue to
// avoid multiple additions of a work item in the same iteration
int marked_host[input_len];
memset(marked_host, 0, sizeof(int) * input_len);
int *marked_device;
checkCudaErrors(cudaMalloc(&marked_device, sizeof(int) * input_len));
checkCudaErrors(cudaMemcpy(marked_device, marked_host, sizeof(int) * input_len, cudaMemcpyHostToDevice));
const dim3 threads(1, 1, 1);
const dim3 blocks(num_blocks, 1, 1);
int dev = 0;
int supportsCoopLaunch = 0;
checkCudaErrors(cudaDeviceGetAttribute(&supportsCoopLaunch, cudaDevAttrCooperativeLaunch, dev));
if (!supportsCoopLaunch) {printf("Cooperative Launch is not supported on this machine configuration. Exiting."); return 0;}
/// This will launch a grid that can maximally fill the GPU, on the default stream with kernel arguments
int numBlocksPerSm = 0;
// Number of threads my_kernel will be launched with
int numThreads = threads.x;
cudaDeviceProp deviceProp;
checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev));
checkCudaErrors(cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, processWorkItems, numThreads, 0));
// launch
void *kernelArgs[] = { &input_len, &max_iter, &histogram_device, &queue_device, &queue_size_device, &marked_device};
dim3 dimBlock = dim3(numThreads,1,1);
num_blocks = min(num_blocks, deviceProp.multiProcessorCount*numBlocksPerSm);
dim3 dimGrid(num_blocks, 1, 1);
printf("launching %d blocks\n", dimGrid.x);
checkCudaErrors(cudaLaunchCooperativeKernel((void*)processWorkItems, dimGrid, dimBlock, kernelArgs));
// processWorkItems<<<blocks, threads>>>(input_len, max_iter, histogram_device, queue_device, queue_size_device, marked_device);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(histogram_host, histogram_device, sizeof(int) * input_len * max_iter, cudaMemcpyDeviceToHost));
int extra = 0;
double deficit = 0;
for(int i = 0; i < input_len; ++i) {
int cnt = 0;
for(int iter = 0; iter < max_iter; ++iter) {
if(histogram_host[iter * input_len + i] > 1) {
++extra;
}
cnt += histogram_host[iter * input_len + i];
}
deficit += max_iter - cnt;
}
printf("Skipped %.2f%% of items. %d extra item processing.\n", deficit / (input_len * max_iter) * 100, extra);
checkCudaErrors(cudaFree(histogram_device));
checkCudaErrors(cudaFree(queue_device));
checkCudaErrors(cudaFree(queue_size_device));
checkCudaErrors(cudaFree(marked_device));
return 0;
}
$ nvcc -o t1811 t1811.cu -arch=sm_70 -std=c++11 -rdc=true
$ cuda-memcheck ./t1811 50 1000 5000
========= CUDA-MEMCHECK
launching 2560 blocks
Skipped 0.00% of items. 0 extra item processing.
========= ERROR SUMMARY: 0 errors
$ cuda-memcheck ./t1811 50 1000 1000
========= CUDA-MEMCHECK
launching 1000 blocks
Skipped 0.00% of items. 0 extra item processing.
========= ERROR SUMMARY: 0 errors
$ ./t1811 50 1000 5000
launching 2560 blocks
Skipped 0.00% of items. 0 extra item processing.
$ ./t1811 50 1000 1000
launching 1000 blocks
Skipped 0.00% of items. 0 extra item processing.
$ ./t1811 50 1000 1000
launching 1000 blocks
Skipped 0.00% of items. 0 extra item processing.
$
I'm not suggesting the above code is defect free or suitable for any particular purpose. It is mostly your code. I've modified it just to demonstrate the concepts mentioned.
As an aside, I changed a few of your large stack-based memory allocations to heap based. I don't recommend trying to create large stack-based arrays such as this:
int histogram_host[input_len * max_iter];
in my opinion its better to do:
int *histogram_host = new int[input_len * max_iter];
As your input command-line parameters become larger, this may become an issue depending on the machine characteristics. This doesn't have much to do with CUDA, however. I've not tried to address every instance of this pattern in your code.
Although not relevant to this particular question, grid sync has other requirements for successful use as well. These are covered in the programming guide and may include but not limited to:
platform support (e.g. OS, GPU, etc.)
kernel sizing requirements (total number of threads or threadblocks launched)
The programming guide contains convenient, boiler-plate code that may be used to satisfy these requirements.

How can i make GPU process much faster than CPU process with CUDA 10.0 in Visual Studio 2017?

Smart developer!
I am the beginner of CUDA programming and I have a big problem with my code.
Following code is a sample code from Nvidia and I changed a little bit for showing the GPU process much faster than from CPU process. However, after compiling this code, I got a unexpected result from that CPU process is much faster than GPU process.
This is my laptop gpu info.
This is my cuda code for Visual Studio 2017.
===========================================================================
#define N 10
This is add2 function() from GPU process
`___global____ void add2(int *a, int *b, int *c) {`
// GPU block from grid sector
//int tid = blockIdx.x; // checking the data of index = if you
insert min of N, you will get slow result from CPU. But if you put big number, this show much faster than CPU
// GPU thread
//int tid = threadIdx.x; // Same result as blockIdx.x
// GPU unexpected vector // Same result as above
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N) {
c[tid] = a[tid] + b[tid];
}
}
This is add function() from CPU process
`void add(int *a, int *b, int *c) {
int tid = 0;
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += 1;
}
}
This is Main function()
int main() {
// Values for time duration
LARGE_INTEGER tFreq, tStart, tEnd;
cudaEvent_t start, stop;
float tms, ms;
int a[N], b[N], c[N]; // CPU values
int *dev_a, *dev_b, *dev_c; // GPU values----------------------------------------------
// Creating alloc for GPU--------------------------------------------------------------
cudaMalloc((void**)&dev_a, N * sizeof(int));
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N * sizeof(int));
// Fill 'a' and 'b' from CPU
for (int i = 0; i < N; i++) {
a[i] = -i;
b[i] = i * i;
}
// Copy values of CPU to GPU values----------------------------------------------------
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
//////////////////////////////////////
QueryPerformanceFrequency(&tFreq); // Frequency set
QueryPerformanceCounter(&tStart); // Time count Start
// CPU operation
add(a, b, c);
//////////////////////////////////////
QueryPerformanceCounter(&tEnd); // TIme count End
tms = ((tEnd.QuadPart - tStart.QuadPart) / (float)tFreq.QuadPart) * 1000;
//////////////////////////////////////
// show result of CPU
cout << fixed;
cout.precision(10);
cout << "CPU Time=" << tms << endl << endl;
for (int i = 0; i < N; i++) {
printf("CPU calculate = %d + %d = %d\n", a[i], b[i], c[i]);
}
cout << endl;
///////////////////////////////////////
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// GPU operatinog---------------------------------------------------------------------
//add2 <<<N,1 >>> (dev_a, dev_b, dev_c); // block
//add2 << <1,N >> > (dev_a, dev_b, dev_c); // Thread
add2 << <N/32+1, 32 >> > (dev_a, dev_b, dev_c); // grid
///////////////////////////////////////
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
///////////////////////////////////////
// show result of GPU
cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << fixed;
cout.precision(10);
cout << "GPU Time=" << ms << endl << endl;
for (int i = 0; i < N; i++) {
printf("GPU calculate = %d + %d = %d\n", a[i], b[i], c[i]);
}
//Free GPU values
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
This is result of compiling this code.
I want to make GPU process much faster than CPU process.
The GPU is generally actually slower than the CPU for running a single operation. Additionally it takes time to send data to the GPU and read it back again.
The advantage of the GPU is it can execute many operations in parallel.
As you have defined N to be 10 it probably takes longer to upload and download the data than to execute on the CPU. In order to see the advantage of the GPU increase your problem size to something much larger. Ideally you want to execute a minimum of a few operations on each GPU core before you start seeing some benefit. For example with your GPU's 1280 cores you would want to execute something like 4000 operations or more at once to get the benefit of the GPU.

cublasSdot is working slower than cublasSgemm

In my toy example I first multiply matrices of size 32x32, 100 000 times, and after that I calculate scalar products of two vectors of size 1024, 100 000 times again. For the first I used cublasSgemm, for the second - cublasSdot.
As a result, time for first calculation is 530 msec, for the second - 10 000 msec. However, in order to multiply matrices we need to perform 32^3 operations (multiply-add), and for scalar product just 1024=32^2 operations.
So why am I getting such result? Here is the code:
__device__ float res;
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
int main(){
cublasHandle_t handle;
float out;
cudaError_t cudaerr;
cudaEvent_t start1, stop1,start2,stop2;
cublasStatus_t stat;
int size = 32;
int num = 100000;
float *h_A = new float[size*size];
float *h_B = new float[size*size];
float *h_C = new float[size*size];
float *d_A, *d_B, *d_C;
const float alpha = 1.0f;
const float beta = 0.0f;
randomInit(h_A, size*size);
randomInit(h_B, size*size);
cudaMalloc((void **)&d_A, size *size *sizeof(float));
cudaMalloc((void **)&d_B, size *size * sizeof(float));
cudaMalloc((void **)&d_C, size *size * sizeof(float));
stat = cublasCreate(&handle);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, d_A, size,
d_B, size, &beta, d_C, size);
cudaEventRecord(start1, NULL);
cudaMemcpy(d_A, h_A, size *size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size *size * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < num; i++){
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, d_A,
size, d_B, size, &beta, d_C, size);
}
cudaMemcpy(h_C, d_C, size*size*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
std::cout <<"total time for MAtMul:" << msecTotal1 << "\n";
cudaEventRecord(start2, NULL);
cudaMemcpy(d_A, h_A, size *size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size *size * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < num; i++){
cublasSdot(handle, 1024, d_A , 1, d_B , 1, &res);
}
cudaEventRecord(stop2, NULL);
cudaEventSynchronize(stop2);
float msecTotal2 = 0.0f;
cudaEventElapsedTime(&msecTotal2, start2, stop2);
std::cout << "total time for dotVec:" << msecTotal2 << "\n";
cublasDestroy(handle);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
delete[] h_A;
delete[] h_B;
delete[] h_C;
return 1;
}
Update: I tried also to perform dot product with cublasSgemm by treating vector as 1 by 1024 matrix. The result is 3550 msec, which is better, but still 7 times more then in the first calculation.
One problem is that you're not handling the pointer mode correctly for the call to cublasSdot.
You'll want to read this section of the manual.
Furthermore this:
cublasSdot(handle, 1024, d_A , 1, d_B , 1, &res);
^^^^
is illegal under any circumstances. It is not legal in CUDA to take the address of a device variable in host code. You can certainly do it, but the results are garbage.
When I modify your code as follows:
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
float *dres;
cudaMalloc(&dres, sizeof(float));
cudaEventRecord(start2, NULL);
cudaMemcpy(d_A, h_A, size *size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size *size * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < num; i++){
if(cublasSdot(handle, 1024, d_A , 1, d_B , 1, dres) != CUBLAS_STATUS_SUCCESS) {std::cout << ".";}
}
I get about a 2:1 ratio of execution time for cublasSdot to cublasSgemm which may be plausible, particularly for these sizes. Under the hood, the dot operation implies a parallel reduction. 1024 threads can compute the partial results, but then a 1024-thread-wide parallel reduction is required. The gemm does not need a parallel reduction, and so may be quicker. 1024 threads can be assigned to produce the 1024 results each in a single thread. For a memory-bound algorithm, the difference between 32^2 and 32^3 operations may not be that significant, but the parallel reduction implies significant additional operations. When I then change size in your program from 32 to 128, I see the ratio reverse, and the matrix multiply does indeed become 3x longer than the dot product.

count3's in cuda is very slow

I have written a small program in CUDA that counts how many 3's are in a C array and prints them.
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cstdlib>
__global__ void incrementArrayOnDevice(int *a, int N, int *count)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
//__shared__ int s_a[512]; // one for each thread
//s_a[threadIdx.x] = a[id];
if( id < N )
{
//if( s_a[threadIdx.x] == 3 )
if( a[id] == 3 )
{
atomicAdd(count, 1);
}
}
}
int main(void)
{
int *a_h; // host memory
int *a_d; // device memory
int N = 16777216;
// allocate array on host
a_h = (int*)malloc(sizeof(int) * N);
for(int i = 0; i < N; ++i)
a_h[i] = (i % 3 == 0 ? 3 : 1);
// allocate arrays on device
cudaMalloc(&a_d, sizeof(int) * N);
// copy data from host to device
cudaMemcpy(a_d, a_h, sizeof(int) * N, cudaMemcpyHostToDevice);
// do calculation on device
int blockSize = 512;
int nBlocks = N / blockSize + (N % blockSize == 0 ? 0 : 1);
printf("number of blocks: %d\n", nBlocks);
int count;
int *devCount;
cudaMalloc(&devCount, sizeof(int));
cudaMemset(devCount, 0, sizeof(int));
incrementArrayOnDevice<<<nBlocks, blockSize>>> (a_d, N, devCount);
// retrieve result from device
cudaMemcpy(&count, devCount, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n", count);
free(a_h);
cudaFree(a_d);
cudaFree(devCount);
}
The result I get is:
real 0m3.025s
user 0m2.989s
sys 0m0.029s
When I run it on the CPU with 4 threads I get:
real 0m0.101s
user 0m0.100s
sys 0m0.024s
Note that the GPU is an old one - I don't know the exact model because I do not have root access to it, but the OpenGL version it runs is 1.2 using the MESA driver.
Am I doing something wrong? What can I do to make it run faster?
Note: I have tried using buckets for each block (so the atomicAdd()s would be reduced for each one) but I get exactly the same performance.
I have also tried copying the 512 integers that are assigned to this block to a shared block of memory (you can see it in the comments) and the time is the same again.
This is in response to your question "What can I do to make it run faster?" As I mentioned in the comments, there are issues (probably) with the timing methodology, and the main suggestion I have for speed improvement is to use a "classical parallel reduction" algorithm. The following code implements a better (in my opinion) timing measurement, and also converts your kernel to a reduction style kernel:
#include <stdio.h>
#include <assert.h>
#include <cstdlib>
#define N (1<<24)
#define nTPB 512
#define NBLOCKS 32
__global__ void incrementArrayOnDevice(int *a, int n, int *count)
{
__shared__ int lcnt[nTPB];
int id = blockIdx.x * blockDim.x + threadIdx.x;
int lcount = 0;
while (id < n) {
if (a[id] == 3) lcount++;
id += gridDim.x * blockDim.x;
}
lcnt[threadIdx.x] = lcount;
__syncthreads();
int stride = blockDim.x;
while(stride > 1) {
// assume blockDim.x is a power of 2
stride >>= 1;
if (threadIdx.x < stride) lcnt[threadIdx.x] += lcnt[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0) atomicAdd(count, lcnt[0]);
}
int main(void)
{
int *a_h; // host memory
int *a_d; // device memory
cudaEvent_t gstart1,gstart2,gstop1,gstop2,cstart,cstop;
float etg1, etg2, etc;
cudaEventCreate(&gstart1);
cudaEventCreate(&gstart2);
cudaEventCreate(&gstop1);
cudaEventCreate(&gstop2);
cudaEventCreate(&cstart);
cudaEventCreate(&cstop);
// allocate array on host
a_h = (int*)malloc(sizeof(int) * N);
for(int i = 0; i < N; ++i)
a_h[i] = (i % 3 == 0 ? 3 : 1);
// allocate arrays on device
cudaMalloc(&a_d, sizeof(int) * N);
int blockSize = nTPB;
int nBlocks = NBLOCKS;
printf("number of blocks: %d\n", nBlocks);
int count;
int *devCount;
cudaMalloc(&devCount, sizeof(int));
cudaMemset(devCount, 0, sizeof(int));
// copy data from host to device
cudaEventRecord(gstart1);
cudaMemcpy(a_d, a_h, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemset(devCount, 0, sizeof(int));
cudaEventRecord(gstart2);
// do calculation on device
incrementArrayOnDevice<<<nBlocks, blockSize>>> (a_d, N, devCount);
cudaEventRecord(gstop2);
// retrieve result from device
cudaMemcpy(&count, devCount, sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(gstop1);
printf("GPU count = %d\n", count);
int hostCount = 0;
cudaEventRecord(cstart);
for (int i=0; i < N; i++)
if (a_h[i] == 3) hostCount++;
cudaEventRecord(cstop);
printf("CPU count = %d\n", hostCount);
cudaEventSynchronize(cstop);
cudaEventElapsedTime(&etg1, gstart1, gstop1);
cudaEventElapsedTime(&etg2, gstart2, gstop2);
cudaEventElapsedTime(&etc, cstart, cstop);
printf("GPU total time = %fs\n", (etg1/(float)1000) );
printf("GPU compute time = %fs\n", (etg2/(float)1000));
printf("CPU time = %fs\n", (etc/(float)1000));
free(a_h);
cudaFree(a_d);
cudaFree(devCount);
}
When I run this on a reasonably fast GPU (a Quadro 5000, a little slower than a Tesla M2050) I get the following:
number of blocks: 32
GPU count = 5592406
CPU count = 5592406
GPU total time = 0.025714s
GPU compute time = 0.000793s
CPU time = 0.017332s
We see that the GPU is substantially faster than this (naive, single-threaded) CPU implementation for the compute portion. When we add in the cost to transfer the data, the GPU version is slower but is not 30x slower.
By way of comparison, when I timed your original algorithm, I got numbers like this:
GPU total time = 0.118131s
GPU compute time = 0.093213s
My system config for this was Xeon X5560 CPU, RHEL 5.5, CUDA 5.0, Quadro5000 GPU.

Finding maximum and minimum with CUBLAS

I'm having problems grasping why my function that finds maximum and minimum in a range of doubles using CUBLAS doesn't work properly.
The code is as follows:
void findMaxAndMinGPU(double* values, int* max_idx, int* min_idx, int n)
{
double* d_values;
cublasHandle_t handle;
cublasStatus_t stat;
safecall( cudaMalloc((void**) &d_values, sizeof(double) * n), "cudaMalloc (d_values) in findMaxAndMinGPU");
safecall( cudaMemcpy(d_values, values, sizeof(double) * n, cudaMemcpyHostToDevice), "cudaMemcpy (h_values > d_values) in findMaxAndMinGPU");
cublasCreate(&handle);
stat = cublasIdamax(handle, n, d_values, sizeof(double), max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIdamin(handle, n, d_values, sizeof(double), min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
cudaFree(d_values);
cublasDestroy(handle);
}
Where values is the values to search within. The max_idx and min_idx are the index of the found numbers in values.
The results from the CUBLAS-calls seems rather random and output wrong indexes.
Anyone with a golly good answer to my problem? I am a tad sad at the moment :(
One of your arguments to both the cublasIdamax and cublasIdamin calls are wrong. The incx argument in BLAS level 1 calls should always be the stride of the input in words, not bytes. So I suspect that you want something more like:
stat = cublasIdamax(handle, n, d_values, 1, max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIdamin(handle, n, d_values, 1, min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
By using sizeof(double) you are telling the routines to use a stride of 8, which will have the calls overrun the allocated storage of the input array and into uninitialised memory. I presume you actually have a stride of 1 in d_values.
Edit: Here is a complete runnable example which works correctly. Note I switched the code to single precision because I don't presently have access to double precision capable hardware:
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cstdio>
#include <cstdlib>
#include <sys/time.h>
typedef float Real;
void findMaxAndMinGPU(Real* values, int* max_idx, int* min_idx, int n)
{
Real* d_values;
cublasHandle_t handle;
cublasStatus_t stat;
cudaMalloc((void**) &d_values, sizeof(Real) * n);
cudaMemcpy(d_values, values, sizeof(Real) * n, cudaMemcpyHostToDevice);
cublasCreate(&handle);
stat = cublasIsamax(handle, n, d_values, 1, max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIsamin(handle, n, d_values, 1, min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
cudaFree(d_values);
cublasDestroy(handle);
}
int main(void)
{
const int vmax=1000, nvals=10000;
float vals[nvals];
srand ( time(NULL) );
for(int j=0; j<nvals; j++) {
vals[j] = float(rand() % vmax);
}
int minIdx, maxIdx;
findMaxAndMinGPU(vals, &maxIdx, &minIdx, nvals);
int cmin = 0, cmax=0;
for(int i=1; i<nvals; i++) {
cmin = (vals[i] < vals[cmin]) ? i : cmin;
cmax = (vals[i] > vals[cmax]) ? i : cmax;
}
fprintf(stdout, "%d %d %d %d\n", minIdx, cmin, maxIdx, cmax);
return 0;
}
which when compiled and run gives this:
$ g++ -I/usr/local/cuda/include -L/usr/local/cuda/lib cublastest.cc -lcudart -lcublas
$ ./a.out
273 272 85 84
note that CUBLAS follows the FORTRAN convention and uses 1 indexing, rather than zero indexing, which is why there is a difference of 1 between the CUBLAS and CPU versions.
from description: The element of the maximum magnitude:
http://docs.nvidia.com/cuda/cublas/index.html#topic_6_1
if you have { 1, 2, 3, -33, 22, 11 }
result will be 4! not 5
abs(-33) > 22