Cuda: XOR single bitset with array of bitsets - c++

I want to XOR a single bitset with a bunch of other bitsets (~100k) and count the set bits of every xor-result. The size of a single bitset is around 20k bits.
The bitsets are already converted to arrays of unsigned int to be able to use the intrinsic __popc()-function. The 'bunch' is already residing contiguously in device-memory.
My current kernel code looks like this:
// Grid/Blocks used for kernel invocation
dim3 block(32);
dim3 grid((bunch_size / 31) + 32);
__global__ void kernelXOR(uint * bitset, uint * bunch, int * set_bits, int bitset_size, int bunch_size) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < bunch_size){ // 1 Thread for each bitset in the 'bunch'
int sum = 0;
uint xor_res = 0;
for (int i = 0; i < bitset_size; ++i){ // Iterate through every uint-block of the bitsets
xor_res = bitset[i] ^ bunch[bitset_size * tid + i];
sum += __popc(xor_res);
}
set_bits[tid] = sum;
}
}
However, compared to a parallelized c++/boost version, I see no benefit using Cuda.
Is there any potential in optimizing this kernel?

Is there any potential in optimizing this kernel?
I see 2 problems here (and they are the first two classical primary optimizations objectives for any CUDA programmer):
You want to try to efficiently use global memory. Your accesses to bitset and bunch are not coalesced. (efficiently use the memory subsystems)
The use of 32 threads per block is generally not recommended and could limit your overall occupancy. One thread per bitset is also potentially problematic. (expose enough parallelism)
Whether addressing those issues will meet your definition of benefit is impossible to say without a comparison test case. Furthermore, simple memory-bound problems like this are rarely interesting in CUDA when considered by themselves. However, we can (probably) improve the performance of your kernel.
We'll use a laundry list of ideas:
have each block handle a bitset, rather than each thread, to enable coalescing
use shared memory to load the comparison bitset, and reuse it
use just enough blocks to saturate the GPU, along with striding loops
use const ... __restrict__ style decoration to possibly benefit from RO cache
Here's a worked example:
$ cat t1649.cu
#include <iostream>
#include <cstdlib>
const int my_bitset_size = 20000/(32);
const int my_bunch_size = 100000;
typedef unsigned uint;
//using one thread per bitset in the bunch
__global__ void kernelXOR(uint * bitset, uint * bunch, int * set_bits, int bitset_size, int bunch_size) {
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if (tid < bunch_size){ // 1 Thread for each bitset in the 'bunch'
int sum = 0;
uint xor_res = 0;
for (int i = 0; i < bitset_size; ++i){ // Iterate through every uint-block of the bitsets
xor_res = bitset[i] ^ bunch[bitset_size * tid + i];
sum += __popc(xor_res);
}
set_bits[tid] = sum;
}
}
const int nTPB = 256;
// one block per bitset, multiple bitsets per block
__global__ void kernelXOR_imp(const uint * __restrict__ bitset, const uint * __restrict__ bunch, int * __restrict__ set_bits, int bitset_size, int bunch_size) {
__shared__ uint sbitset[my_bitset_size]; // could also be dynamically allocated for varying bitset sizes
__shared__ int ssum[nTPB];
// load shared, block-stride loop
for (int idx = threadIdx.x; idx < bitset_size; idx += blockDim.x) sbitset[idx] = bitset[idx];
__syncthreads();
// stride across all bitsets in bunch
for (int bidx = blockIdx.x; bidx < bunch_size; bidx += gridDim.x){
int my_sum = 0;
for (int idx = threadIdx.x; idx < bitset_size; idx += blockDim.x) my_sum += __popc(sbitset[idx] ^ bunch[bidx*bitset_size + idx]);
// block level parallel reduction
ssum[threadIdx.x] = my_sum;
for (int ridx = nTPB>>1; ridx > 0; ridx >>=1){
__syncthreads();
if (threadIdx.x < ridx) ssum[threadIdx.x] += ssum[threadIdx.x+ridx];}
if (!threadIdx.x) set_bits[bidx] = ssum[0];}
}
int main(){
// data setup
uint *d_cbitset, *d_bitsets, *h_cbitset, *h_bitsets;
int *d_r, *h_r, *h_ri;
h_cbitset = new uint[my_bitset_size];
h_bitsets = new uint[my_bitset_size*my_bunch_size];
h_r = new int[my_bunch_size];
h_ri = new int[my_bunch_size];
for (int i = 0; i < my_bitset_size*my_bunch_size; i++){
h_bitsets[i] = rand();
if (i < my_bitset_size) h_cbitset[i] = rand();}
cudaMalloc(&d_cbitset, my_bitset_size*sizeof(uint));
cudaMalloc(&d_bitsets, my_bitset_size*my_bunch_size*sizeof(uint));
cudaMalloc(&d_r, my_bunch_size*sizeof(int));
cudaMemcpy(d_cbitset, h_cbitset, my_bitset_size*sizeof(uint), cudaMemcpyHostToDevice);
cudaMemcpy(d_bitsets, h_bitsets, my_bitset_size*my_bunch_size*sizeof(uint), cudaMemcpyHostToDevice);
// original
// Grid/Blocks used for kernel invocation
dim3 block(32);
dim3 grid((my_bunch_size / 31) + 32);
kernelXOR<<<grid, block>>>(d_cbitset, d_bitsets, d_r, my_bitset_size, my_bunch_size);
cudaMemcpy(h_r, d_r, my_bunch_size*sizeof(int), cudaMemcpyDeviceToHost);
// improved
dim3 iblock(nTPB);
dim3 igrid(640);
kernelXOR_imp<<<igrid, iblock>>>(d_cbitset, d_bitsets, d_r, my_bitset_size, my_bunch_size);
cudaMemcpy(h_ri, d_r, my_bunch_size*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < my_bunch_size; i++)
if (h_r[i] != h_ri[i]) {std::cout << "mismatch at i: " << i << " was: " << h_ri[i] << " should be: " << h_r[i] << std::endl; return 0;}
std::cout << "Results match." << std::endl;
return 0;
}
$ nvcc -o t1649 t1649.cu
$ cuda-memcheck ./t1649
========= CUDA-MEMCHECK
Results match.
========= ERROR SUMMARY: 0 errors
$ nvprof ./t1649
==18868== NVPROF is profiling process 18868, command: ./t1649
Results match.
==18868== Profiling application: ./t1649
==18868== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 97.06% 71.113ms 2 35.557ms 2.3040us 71.111ms [CUDA memcpy HtoD]
2.26% 1.6563ms 1 1.6563ms 1.6563ms 1.6563ms kernelXOR(unsigned int*, unsigned int*, int*, int, int)
0.59% 432.68us 1 432.68us 432.68us 432.68us kernelXOR_imp(unsigned int const *, unsigned int const *, int*, int, int)
0.09% 64.770us 2 32.385us 31.873us 32.897us [CUDA memcpy DtoH]
API calls: 78.20% 305.44ms 3 101.81ms 11.373us 304.85ms cudaMalloc
18.99% 74.161ms 4 18.540ms 31.554us 71.403ms cudaMemcpy
1.39% 5.4121ms 4 1.3530ms 675.30us 3.3410ms cuDeviceTotalMem
1.26% 4.9393ms 388 12.730us 303ns 530.95us cuDeviceGetAttribute
0.11% 442.37us 4 110.59us 102.61us 125.59us cuDeviceGetName
0.03% 128.18us 2 64.088us 21.789us 106.39us cudaLaunchKernel
0.01% 35.764us 4 8.9410us 2.9670us 18.982us cuDeviceGetPCIBusId
0.00% 8.3090us 8 1.0380us 540ns 1.3870us cuDeviceGet
0.00% 5.9530us 3 1.9840us 310ns 3.9900us cuDeviceGetCount
0.00% 2.8800us 4 720ns 574ns 960ns cuDeviceGetUuid
$
In this case, on my Tesla V100, for your problem size, I witness about a 4x improvement in kernel performance. However the kernel performance here is tiny compared to the cost of data movement. So it's unlikely that these sort of optimizations would make a significant difference in your comparison test case, if this is the only thing you are doing on the GPU.
The code above uses striding-loops at the block level and at the grid level, which means it should behave correctly for almost any choice of threadblock size (multiple of 32 please) as well as grid size. That doesn't mean that any/all choices will perform equally. The choice of the threadblock size is to allow the possibility for nearly full occupancy (so don't choose 32). The choice of the grid size is the number of blocks to achieve full occupancy per SM, times the number of SMs. These should be nearly optimal choices, but according to my testing e.g. a larger number of blocks doesn't really reduce performance, and the performance should be roughly constant for nearly any threadblock size (except 32), assuming the number of blocks is calculated accordingly.

Related

2D tiled convolution taking more time than untiled version

Writing a code that perform a 2D convolution on a float matrix, in both tiled and untiled version. I'm assuming the width of the tile as
BLOCK_SIZE - MASK_WIDTH + 1
, using halo cells.
But for a 1024 matrix and masks varing from 3 to 9 I get the untiled version performing better:
untiled version
vs
tiled
Both matrix and mask are defined in a constant manner, equal for tiled and untiled. No random values/sizes used.
I guess I'm doing some wrong assumption about the tile size, but even after doing some research the implementation seems quite legit.
#define MATRIX_SIZE 1024
#define BLOCK_WIDTH 32
Here's the kernel code for the tiled version
__global__ void convolution_2D_tiled(float* in, const float* __restrict__ mask, float* out, size_t mask_width, size_t w, size_t h) {
float outputPixel = 0; //minimize write to global memory: stored in register
int tx = threadIdx.x;
int ty = threadIdx.y;
int tile_width = BLOCK_WIDTH - mask_width + 1; //since BLOCK_WIDTH = TILE_WIDTH + MASK_WIDTH - 1
int col = blockIdx.x * tile_width + tx;
int row = blockIdx.y * tile_width + ty;
//picking the starting indexes of input matrix inside the mask
//(TOP-LEFT of the mask)
int inputRow = row - (mask_width / 2);
int inputCol = col - (mask_width / 2);
__shared__ float tile[BLOCK_WIDTH][BLOCK_WIDTH];
// Load tile elements
if (inputRow >= 0 && inputRow < h && inputCol >= 0 && inputCol < w)
tile[ty][tx] = in[inputRow * w + inputCol];
else
tile[ty][tx] = 0.0;
// Wait until all tile elements are loaded
__syncthreads();
//some thread won't write any outputs, only need to calculate tile_width elements
if (col < w && row < h && ty < tile_width && tx < tile_width) {
//get the neighbour in the mask
for (int i = 0; i < mask_width; ++i) {
for (int j = 0; j < mask_width; ++j) { //(Mask_Width^2) access for each thread in block -> for each block (Mask_Width^2) * (Block_width^2)
outputPixel += tile[i + ty][j + tx] * mask[i * mask_width + j];
}
}
out[(row * w) + col] = (float)(outputPixel);
}
}
The main with the matrix generation and sizes assumptions:
void errorCheck(unsigned int line){
cudaError_t cudaError = cudaGetLastError();
// if error code wasn't a code describing success
if (cudaError != cudaSuccess)
{
// output that there has been a CUDA error in the line of the CUDA function call
// and exit the program
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}}
int main(int argc, char const* argv[]){
for (size_t mask_width = 3; mask_width <= 9; mask_width += 2) {
printf("Testing with mask size = %d\n\n", mask_width);
float* a;
float* b;
float* c;
cudaMallocManaged((void **) &a, sizeof(float)*MATRIX_SIZE*MATRIX_SIZE);
cudaMallocManaged((void **) &b, sizeof(int)*mask_width*mask_width);
cudaMallocManaged((void **) &c, sizeof(int)*MATRIX_SIZE*MATRIX_SIZE);
// initialize matrix A
for (int i = 0; i < MATRIX_SIZE; ++i) {
for (int j = 0; j < MATRIX_SIZE; ++j) {
a[i * MATRIX_SIZE + j] = (float)(1 +(3 * j % 20));
}
}
// initialize matrix B
for (int i = 0; i < mask_width; ++i) {
for (int j = 0; j < mask_width; ++j) {
b[i * mask_width + j] = (float)(1 + (((2 * i) + j) % mask_width));
}
}
float naive_gpu_elapsed_time_ms;
// some events to count the execution time
//clock_t st, end;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int tile_width = BLOCK_WIDTH - mask_width + 1;
dim3 dimGrid(MATRIX_SIZE / tile_width, MATRIX_SIZE / tile_width);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
errorCheck(__LINE__);
cudaEventRecord(start, 0);
convolution_2D_tiled <<<dimGrid, dimBlock >>> (a, b, c, mask_width, MATRIX_SIZE, MATRIX_SIZE);
errorCheck(__LINE__);
cudaThreadSynchronize();
//time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//compute time elapsed on GPU computing
cudaEventElapsedTime(&naive_gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on naive GPU convolution 2d tiled ( %d ) block %f ms.\n\n", BLOCK_WIDTH, naive_gpu_elapsed_time_ms);
//free memory
cudaFree(a);
cudaFree(b);
cudaFree(c);
printf("________________________________________________________________________\n\n");
}
return 0;
}
I'm using google colab with Tesla T4 GPU, and no CUDA error is thrown.
Also tried to use bigger masks (11, 15 ..) but no changes in comparison between tiled and untiled.
You are making inefficient usage of managed memory as discussed here and here.
Nearly all of your ~2ms of execution time is used in inefficient demand-paged copying of data from host to device. As a result, your ability to resolve the difference in performance in the two cases due to the device code changes is almost completely obscured.
If you add these 3 lines of code immediately before float naive_gpu_elapsed_time_ms;, you will observe that your reported execution times decrease dramatically, and you should be able to better judge the performance difference between the shared memory tiled version and the non-tiled version:
cudaMemPrefetchAsync(a, sizeof(float)*MATRIX_SIZE*MATRIX_SIZE, 0);
cudaMemPrefetchAsync(b, sizeof(int)*mask_width*mask_width, 0);
cudaMemPrefetchAsync(c, sizeof(int)*MATRIX_SIZE*MATRIX_SIZE, 0);
You haven't shown your non-tiled code, so I can't demonstrate that for you. Here's an example profiling output using a non-tiled convolution code that I wrote, comparing to your tiled kernel, and including the cudaMemPrefetchAsync() statements:
$ nvprof ./t2140
Testing with mask size = 3
==13236== NVPROF is profiling process 13236, command: ./t2140
Time elapsed on naive GPU convolution 2d tiled ( 32 ) block 0.032832 ms.
________________________________________________________________________
Testing with mask size = 5
Time elapsed on naive GPU convolution 2d tiled ( 32 ) block 0.061120 ms.
________________________________________________________________________
Testing with mask size = 7
Time elapsed on naive GPU convolution 2d tiled ( 32 ) block 0.086080 ms.
________________________________________________________________________
Testing with mask size = 9
Time elapsed on naive GPU convolution 2d tiled ( 32 ) block 0.118688 ms.
________________________________________________________________________
==13236== Profiling application: ./t2140
==13236== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 52.59% 311.69us 4 77.922us 41.089us 119.08us convolution_2D(float*, float const *, float*, unsigned long, unsigned long, unsigned long)
47.41% 280.97us 4 70.241us 28.449us 114.28us convolution_2D_tiled(float*, float const *, float*, unsigned long, unsigned long, unsigned long)
API calls: 96.10% 365.32ms 12 30.443ms 12.906us 365.10ms cudaMallocManaged
1.32% 5.0301ms 4 1.2575ms 586.91us 3.2433ms cuDeviceTotalMem
0.66% 2.4917ms 404 6.1670us 320ns 268.82us cuDeviceGetAttribute
0.56% 2.1277ms 12 177.31us 8.3020us 578.90us cudaMemPrefetchAsync
0.50% 1.9035ms 4 475.88us 295.08us 549.01us cudaDeviceSynchronize
0.49% 1.8594ms 12 154.95us 75.533us 328.85us cudaFree
0.14% 526.53us 4 131.63us 42.014us 220.14us cudaEventSynchronize
0.11% 399.28us 4 99.820us 61.310us 210.74us cuDeviceGetName
0.09% 351.52us 8 43.940us 11.426us 116.52us cudaLaunchKernel
0.01% 45.911us 8 5.7380us 4.1870us 10.243us cudaEventRecord
0.01% 25.946us 8 3.2430us 935ns 10.182us cudaEventCreate
0.01% 21.643us 4 5.4100us 3.1450us 8.6700us cuDeviceGetPCIBusId
0.00% 10.304us 8 1.2880us 430ns 5.0980us cuDeviceGet
0.00% 9.6790us 4 2.4190us 1.9560us 3.7180us cudaEventElapsedTime
0.00% 3.3390us 3 1.1130us 617ns 1.6520us cuDeviceGetCount
0.00% 3.2480us 4 812ns 700ns 1.0470us cuDeviceGetUuid
0.00% 3.1420us 8 392ns 229ns 1.2110us cudaGetLastError
==13236== Unified Memory profiling result:
Device "Tesla V100-PCIE-32GB (0)"
Count Avg Size Min Size Max Size Total Size Total Time Name
12 1.3346MB 4.0000KB 2.0000MB 16.01563MB 1.405760ms Host To Device
Total CPU Page faults: 52
$
You can see that in each case, the tiled/shared memory kernel is faster.

Why is this simple CUDA kernel getting a wrong result?

I am a newbie with CUDA. I'm learning some basic things because I want to use CUDA in other project. I have wrote this code in order to add all the elements from a squared matrix 8x8 which has been filled with 1's so the result must be 64.
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
const int SIZE = 64;
__global__ void add_matrix_values(int* matrix, int sum, int c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.x * blockDim.x;
sum += matrix[i*c+j];
}
int main()
{
int* device_matrix;
int* host_matrix;
int c = 8; //Squared matrix cxc
int device_c = 8;
int device_sum = 0;
int host_sum = 0;
//Allocate host memory
host_matrix = (int*)malloc(sizeof(int)*SIZE);
//Fill the matrix values with 1's
for(auto i = 0; i < SIZE; i++)
host_matrix[i] = 1;
//Allocate device memory
cudaMalloc((void**) &device_matrix,sizeof(int)*SIZE);
cudaMalloc((void**) &device_sum, sizeof(int));
cudaMalloc((void**) &device_c,sizeof(int));
//Fill device_matrix with host_matrix values
cudaMemcpy(&device_matrix,&host_matrix,sizeof(int)*SIZE,cudaMemcpyHostToDevice);
//Initialize device_sum with a 0
cudaMemcpy(&device_sum,&host_sum,sizeof(int),cudaMemcpyHostToDevice);
//Initialize device_c with the correct value
cudaMemcpy(&device_c,&c,sizeof(int),cudaMemcpyHostToDevice);
//4 blocks with 16 threads every single block ¿Is this correct?
add_matrix_values<<<4,16>>>(device_matrix, device_sum,device_c);
cudaMemcpy(&host_sum,&device_sum,sizeof(int),cudaMemcpyDeviceToHost);
std::cout<<"The value is: "<<host_sum<<std::endl;
cudaFree(device_matrix);
free(host_matrix);
return 0;
}
The result must be 64 but I'm getting wrong numbers.
migue#migue  ~/Escritorio  ./program
The value is: 32762
migue#migue  ~/Escritorio  ./program
The value is: 32608
migue#migue  ~/Escritorio  ./program
The value is: 32559
I dont't know what I'm doing wrong. It could be the gridSize and the blockSize ? or It could be the i and j operation in the cuda Kernel ?
I dont understand very well that terms.
There are a number of issues:
You are creating a 1-D grid (grid configuration, block configuration) so your 2-D indexing in kernel code (i,j, or x,y) doesn't make any sense
You are passing sum by value. You cannot retrieve a result that way. Changes in the kernel to sum won't be reflected in the calling environment. This is a C++ concept, not specific to CUDA. Use a properly allocated pointer instead.
In a CUDA multithreading environment, you cannot have multiple threads update the same location/value without any control. CUDA does not sort out that kind of access for you. You must use a parallel reduction technique, and a simplistic approach here could be to use atomics. You can find many questions here on the cuda tag discussing parallel reductions.
You're generally confusing pass by value and pass by pointer. Items passed by value can be ordinary host variables. You generally don't need a cudaMalloc allocation for those. You also don't use cudaMalloc on any kind of variable except a pointer.
Your use of cudaMemcpy is incorrect. There is no need to take the address of the pointers.
The following code has the above items addressed:
$ cat t135.cu
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
const int SIZE = 64;
__global__ void add_matrix_values(int* matrix, int *sum, int c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
atomicAdd(sum, matrix[i]);
}
int main()
{
int* device_matrix;
int* host_matrix;
int device_c = 8;
int *device_sum;
int host_sum = 0;
//Allocate host memory
host_matrix = (int*)malloc(sizeof(int)*SIZE);
//Fill the matrix values with 1's
for(auto i = 0; i < SIZE; i++)
host_matrix[i] = 1;
//Allocate device memory
cudaMalloc((void**) &device_matrix,sizeof(int)*SIZE);
cudaMalloc((void**) &device_sum, sizeof(int));
//Fill device_matrix with host_matrix values
cudaMemcpy(device_matrix,host_matrix,sizeof(int)*SIZE,cudaMemcpyHostToDevice);
//Initialize device_sum with a 0
cudaMemcpy(device_sum,&host_sum,sizeof(int),cudaMemcpyHostToDevice);
//4 blocks with 16 threads every single block ¿Is this correct?
add_matrix_values<<<4,16>>>(device_matrix, device_sum,device_c);
cudaMemcpy(&host_sum,device_sum,sizeof(int),cudaMemcpyDeviceToHost);
std::cout<<"The value is: "<<host_sum<<std::endl;
cudaFree(device_matrix);
free(host_matrix);
return 0;
}
$ nvcc -o t135 t135.cu
$ cuda-memcheck ./t135
========= CUDA-MEMCHECK
The value is: 64
========= ERROR SUMMARY: 0 errors
$

What is the total thread count(executed over time, not parallel) for CUDA?

I need to execute a function about 10^11 times. The function is self-contained and requires one integer as input, let's call it f(n). The range of n is in fact 0 < n < 10^11. We can ignore inclusion of endpoints, I just need the concept about running something of this magnitude in terms of indexes on CUDA.
I want to run this function using CUDA, but I have troubles conceptually. Namely, I know how to simulate my n, mentioned above, using the blocks and threads indexes. As shown in slide 40 of, nVidia Tutorial But, what happens when n>TotalNumberOfThreadsPer_CUDA_Call.
Essentially, does the thread count and block count reset for every call I make to run functions on CUDA? If so, is there a simple way to simulate n, as described earlier, for arbitrarily large n?
Thanks.
A common pattern when you want to process more elements than there are threads is to simply loop over your data in grid-sized chunks:
__global__ void kernel(int* data, size_t size) {
for (size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
idx < size;
idx += gridDim.x * blockDim.x) {
// do something with data[idx] ...
}
}
Another option is to launch several consecutive kernels with a start offset:
__global__ void kernel(int* data, size_t size, size_t offset) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x + offset;
if (idx < size) {
// do something with data[idx] ...
}
}
// Host code
dim3 gridSize = ...;
dim3 blockSize = ...;
for (size_t offset = 0; offset < totalWorkSize; offset += gridSize * blockSize) {
kernel<<<gridSize, blockSize>>>(data, totalWorkSize, offset);
}
In both cases, you can process an "arbitrarily large" number of elements. You're still limited by size_t, so for 10^11 elements you will need to compile your code for 64 bits.
If you have to store the data instead of just computing it, you will need to do it in an iterative method. 10^11 values of any type are not going to fit in GPU memory.
I haven't compiled this code, but hopefully you'll get the gist.
__device__ double my_function(int value);
__global__ void my_kernel(int* data, size_t offset, size_t chunk_size) {
size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = gridDim.x*blockDim.x;
while(idx<chunk_size){
data[idx]=my_function(idx+offset);
idx+=stride;
}
}
void runKernel(size_t num_values){
size_t block_size = 128;
size_t grid_size = 1024;
size_t free_mem, total_mem;
cudaMemGetInfo(&free, &total);
size_t chunk_size = sizeof(double)/free_mem;
double *data;
cudaMalloc(&data, chunk_size);
for(size_t i=0; i<num_values; i+=chunk_size){
my_kernel<<<grid_size, block_size>>>(data, i, chunk_size);
//copy to host and process
//or call another kernel on device to process further
}
//process remainder of values that need to be run assuming num_values%chunk_size!=0
}

How to efficiently repeat a vector to a matrix in cuda?

I want to repeat a vector to form a matrix in cuda, avoiding too many memcopy. Both vector and matrix are allocated on GPU.
For example:
I have a vector:
a = [1 2 3 4]
expand it to a matrix:
b = [1 2 3 4;
1 2 3 4;
.......
1 2 3 4]
What I have tried is to assign each element of b. But this involves a lot of GPU memory to GPU memory copy.
I know this is easy in matlab (using repmat), but how to do it in cuda efficiently? I didn't find any routine in cublas.
EDIT based on the comments, I've updated the code to a version that will handle either row-major or column-major underlying storage.
Something like this should be reasonably fast:
// for row_major, blocks*threads should be a multiple of vlen
// for column_major, blocks should be equal to vlen
template <typename T>
__global__ void expand_kernel(const T* vector, const unsigned vlen, T* matrix, const unsigned mdim, const unsigned col_major=0){
if (col_major){
int idx = threadIdx.x+blockIdx.x*mdim;
T myval = vector[blockIdx.x];
while (idx < ((blockIdx.x+1)*mdim)){
matrix[idx] = myval;
idx += blockDim.x;
}
}
else{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
T myval = vector[idx%vlen];
while (idx < mdim*vlen){
matrix[idx] = myval;
idx += gridDim.x*blockDim.x;
}
}
}
This assumes your matrix is of dimensions mdim rows x vlen columns (seems to be what you have outlined in the question.)
You can tune the grid and block dimensions to find out what works fastest for your particular GPU. For the row-major case, start with 256 or 512 threads per block, and set the number of blocks equal to or greater than 4 times the number of SMs in your GPU. Choose the product of grid and block dimensions to be equal to an integer multiple of your vector length vlen. If this is difficult, choosing an arbitrary, but "large" threadblock size, such as 250 or 500, should not result in much lost efficiency.
For the column-major case, choose 256 or 512 threads per block, and choose the number of blocks equal to vlen, the vector length. If vlen > 65535, you will need to compile this for compute capability 3.0 or higher. If vlen is small, perhaps less than 32, the efficiency of this method may be significantly reduced. Some mitigation will be found if you increase the threads per block to the maximum for your GPU, either 512 or 1024. There may be other "expand" realizations that may be better suited to the column-major "narrow" matrix case. For example, a straightforward modification to the column-major code would allow two blocks per vector element, or four blocks per vector element, and the total launched blocks would then be 2*vlen or 4*vlen, for example.
Here's a fully worked example, along with a run of bandwidth test, to demonstrate that the above code achieves ~90% of the throughput indicated by bandwidthTest:
$ cat t546.cu
#include <stdio.h>
#define W 512
#define H (512*1024)
// for row_major, blocks*threads should be a multiple of vlen
// for column_major, blocks should be equal to vlen
template <typename T>
__global__ void expand_kernel(const T* vector, const unsigned vlen, T* matrix, const unsigned mdim, const unsigned col_major=0){
if (col_major){
int idx = threadIdx.x+blockIdx.x*mdim;
T myval = vector[blockIdx.x];
while (idx < ((blockIdx.x+1)*mdim)){
matrix[idx] = myval;
idx += blockDim.x;
}
}
else{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
T myval = vector[idx%vlen];
while (idx < mdim*vlen){
matrix[idx] = myval;
idx += gridDim.x*blockDim.x;
}
}
}
template <typename T>
__global__ void check_kernel(const T* vector, const unsigned vlen, T* matrix, const unsigned mdim, const unsigned col_major=0){
unsigned i = 0;
while (i<(vlen*mdim)){
unsigned idx = (col_major)?(i/mdim):(i%vlen);
if (matrix[i] != vector[idx]) {printf("mismatch at offset %d\n",i); return;}
i++;}
}
int main(){
int *v, *m;
cudaMalloc(&v, W*sizeof(int));
cudaMalloc(&m, W*H*sizeof(int));
int *h_v = (int *)malloc(W*sizeof(int));
for (int i = 0; i < W; i++)
h_v[i] = i;
cudaMemcpy(v, h_v, W*sizeof(int), cudaMemcpyHostToDevice);
// test row-major
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
expand_kernel<<<44, W>>>(v, W, m, H);
cudaEventRecord(stop);
float et;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
printf("row-majortime: %fms, bandwidth: %.0fMB/s\n", et, W*H*sizeof(int)/(1024*et));
check_kernel<<<1,1>>>(v, W, m, H);
cudaDeviceSynchronize();
// test col-major
cudaEventRecord(start);
expand_kernel<<<W, 256>>>(v, W, m, H, 1);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&et, start, stop);
printf("col-majortime: %fms, bandwidth: %.0fMB/s\n", et, W*H*sizeof(int)/(1024*et));
check_kernel<<<1,1>>>(v, W, m, H, 1);
cudaDeviceSynchronize();
return 0;
}
$ nvcc -arch=sm_20 -o t546 t546.cu
$ ./t546
row-majortime: 13.066944ms, bandwidth: 80246MB/s
col-majortime: 12.806720ms, bandwidth: 81877MB/s
$ /usr/local/cuda/samples/bin/x86_64/linux/release/bandwidthTest
[CUDA Bandwidth Test] - Starting...
Running on...
Device 0: Quadro 5000
Quick Mode
Host to Device Bandwidth, 1 Device(s)
PINNED Memory Transfers
Transfer Size (Bytes) Bandwidth(MB/s)
33554432 5864.2
Device to Host Bandwidth, 1 Device(s)
PINNED Memory Transfers
Transfer Size (Bytes) Bandwidth(MB/s)
33554432 6333.1
Device to Device Bandwidth, 1 Device(s)
PINNED Memory Transfers
Transfer Size (Bytes) Bandwidth(MB/s)
33554432 88178.6
Result = PASS
$
CUDA 6.5, RHEL 5.5
This can also be implemented using a CUBLAS Rank-1 update function but it will be considerably slower than the above method.

Unable to find simple sum of 1 to 100 numbers in CUDA?

I am working on image processing algorithm using CUDA. In my algorithm i want to find sum of all pixels of image using CUDA kernel. so i made kernel method in cuda for measure sum of all pixels of 16 bit gray scale image, but i got wrong answer.
So i make simple program in cuda for find sum of 1 to 100 numbers and my code is below.
In my code i got not exact sum of that 1 to 100 numbers using GPU, but i got exact sum of that 1 to 100 numbers using CPU. So what i had done in that code ?
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <conio.h>
#include <malloc.h>
#include <limits>
#include <math.h>
using namespace std;
__global__ void computeMeanValue1(double *pixels,double *sum){
int x = threadIdx.x;
sum[0] = sum[0] + (pixels[(x)]);
__syncthreads();
}
int main(int argc, char **argv)
{
double *data;
double *dev_data;
double *dev_total;
double *total;
data=new double[(100) * sizeof(double)];
total=new double[(1) * sizeof(double)];
double cpuSum=0.0;
for(int i=0;i<100;i++){
data[i]=i+1;
cpuSum=cpuSum+data[i];
}
cout<<"CPU total = "<<cpuSum<<std::endl;
cudaMalloc( (void**)&dev_data, 100 * sizeof(double));
cudaMalloc( (void**)&dev_total, 1 * sizeof(double));
cudaMemcpy(dev_data, data, 100 * sizeof(double), cudaMemcpyHostToDevice);
computeMeanValue1<<<1,100>>>(dev_data,dev_total);
cudaDeviceSynchronize();
cudaMemcpy(total, dev_total, 1* sizeof(double), cudaMemcpyDeviceToHost);
cout<<"GPU total = "<<total[0]<<std::endl;
cudaFree(dev_data);
cudaFree(dev_total);
free(data);
free(total);
getch();
return 0;
}
All your threads are writing to the same memory location at the same time.
sum[0] = sum[0] + (pixels[(x)]);
You can't do this and expect to get the correct result. Your kernel needs to take a different approach to avoid writing to the same memory from different threads. The pattern usually employed for doing this is reduction. Simply put with a reduction each thread is responsible for summing a block of elements within the array and then storing the result. By employing a series of these reduction operations its possible to sum the entire contents of the array.
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
extern __shared__ float sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
Each thread writes to a different location in sdata[threadIdx.x] there is no race condition. Threads are free to access other elements in sdata because they only read from them so there are no race conditions. Note the use of __syncthreads() to ensure that the operations to load data into sdata are complete before the threads start to read the data and the second call to __syncthreads() to ensure that all the summation operations have completed before copying the final result from sdata[0]. Note that only thread 0 writes its result to per_block_results[blockIdx.x], so there is no race condition there either.
You can find the complete sample code for the above on Google Code (I did not write this). This slide deck has a reasonable summary of reductions in CUDA. It includes diagrams which really help in understanding how the interleaved memory reads and writes do not conflict with each other.
You can find lots of other material on efficient implementations of reduction on GPUs. Ensuring that your implementation makes most efficient use of memory is key to getting the best performance out of a memory bound operation like reduction.
In GPU code, we have multiple threads executing in parallel. If all of those threads attempt to update the same location in memory, we have undefined behavior, unless we use special operations, called atomics to do the update.
In your case, since sum is updated by all threads, and sum is a double quantity, we can use the special custom atomic function described in the programming guide to accomplish this.
If I replace your kernel code with the following:
__device__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void computeMeanValue1(double *pixels,double *sum){
int x = threadIdx.x;
atomicAdd(sum, pixels[x]);
}
And initialize the sum value to zero before the kernel:
double gpuSum = 0.0;
cudaMemcpy(dev_total, &gpuSum, sizeof(double), cudaMemcpyHostToDevice);
Then I think you'll get matching results.
As #AdeMiller pointed out, the faster way to perform parallel sums like this is via classical parallel reduction.
There is a CUDA sample code that demonstrates this and an accompanying presentation that covers the methodology.