I'm new to cuda. I want to add up two 2d array into a third array.
I use following code:
cudaMallocPitch((void**)&device_a, &pitch, 2*sizeof(int),2);
cudaMallocPitch((void**)&device_b, &pitch, 2*sizeof(int),2);
cudaMallocPitch((void**)&device_c, &pitch, 2*sizeof(int),2);
now my problem is that i dont want to use these array as flattened 2-d array
all in my kernel code i want to di is use two for loop & put the result in the third array like
__global__ void add(int *dev_a ,int *dev_b,int* dec_c)
{
for i=0;i<2;i++)
{
for j=0;j<2;j++)
{
dev_c[i][j]=dev_a[i][j]+dev_b[i][j];
}
}
}
How i can do this in CUDA?
please tell me how to use 2-d array in this way ?
What should be the kernel call for using 2d-array ?
If possible, please explain using code samples.
The short answer is, you can't. The cudaMallocPitch()function does exactly what its name implies, it allocates pitched linear memory, where the pitch is chosen to be optimal for the GPU memory controller and texture hardware.
If you wanted to use arrays of pointers in the kernel, the kernel code would have to look like this:
__global___ void add(int *dev_a[] ,int *dev_b[], int* dec_c[])
{
for i=0;i<2;i++) {
for j=0;j<2;j++) {
dev_c[i][j]=dev_a[i][j]+dev_b[i][j];
}
}
}
and then you would need nested cudaMalloc calls on the host side to construct the array of pointers and copy it to device memory. For your rather trivial 2x2 example, the code to allocate a single array would look like this:
int ** h_a = (int **)malloc(2 * sizeof(int *));
cudaMalloc((void**)&h_a[0], 2*sizeof(int));
cudaMalloc((void**)&h_a[1], 2*sizeof(int));
int **d_a;
cudaMalloc((void ***)&d_a, 2 * sizeof(int *));
cudaMemcpy(d_a, h_a, 2*sizeof(int *), cudaMemcpyHostToDevice);
Which would leave the allocated device array of pointers in d_a, and you would pass that to your kernel.
For code complexity and performance reasons, you really don't want to do that, using arrays of pointers in CUDA code is both harder and slower than the alternative using linear memory.
To show what folly using arrays of pointers is in CUDA, here is a complete working example of your sample problem which combines the two ideas above:
#include <cstdio>
__global__ void add(int * dev_a[], int * dev_b[], int * dev_c[])
{
for(int i=0;i<2;i++)
{
for(int j=0;j<2;j++)
{
dev_c[i][j]=dev_a[i][j]+dev_b[i][j];
}
}
}
inline void GPUassert(cudaError_t code, char * file, int line, bool Abort=true)
{
if (code != 0) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),file,line);
if (Abort) exit(code);
}
}
#define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); }
int main(void)
{
const int aa[2][2]={{1,2},{3,4}};
const int bb[2][2]={{5,6},{7,8}};
int cc[2][2];
int ** h_a = (int **)malloc(2 * sizeof(int *));
for(int i=0; i<2;i++){
GPUerrchk(cudaMalloc((void**)&h_a[i], 2*sizeof(int)));
GPUerrchk(cudaMemcpy(h_a[i], &aa[i][0], 2*sizeof(int), cudaMemcpyHostToDevice));
}
int **d_a;
GPUerrchk(cudaMalloc((void ***)&d_a, 2 * sizeof(int *)));
GPUerrchk(cudaMemcpy(d_a, h_a, 2*sizeof(int *), cudaMemcpyHostToDevice));
int ** h_b = (int **)malloc(2 * sizeof(int *));
for(int i=0; i<2;i++){
GPUerrchk(cudaMalloc((void**)&h_b[i], 2*sizeof(int)));
GPUerrchk(cudaMemcpy(h_b[i], &bb[i][0], 2*sizeof(int), cudaMemcpyHostToDevice));
}
int ** d_b;
GPUerrchk(cudaMalloc((void ***)&d_b, 2 * sizeof(int *)));
GPUerrchk(cudaMemcpy(d_b, h_b, 2*sizeof(int *), cudaMemcpyHostToDevice));
int ** h_c = (int **)malloc(2 * sizeof(int *));
for(int i=0; i<2;i++){
GPUerrchk(cudaMalloc((void**)&h_c[i], 2*sizeof(int)));
}
int ** d_c;
GPUerrchk(cudaMalloc((void ***)&d_c, 2 * sizeof(int *)));
GPUerrchk(cudaMemcpy(d_c, h_c, 2*sizeof(int *), cudaMemcpyHostToDevice));
add<<<1,1>>>(d_a,d_b,d_c);
GPUerrchk(cudaPeekAtLastError());
for(int i=0; i<2;i++){
GPUerrchk(cudaMemcpy(&cc[i][0], h_c[i], 2*sizeof(int), cudaMemcpyDeviceToHost));
}
for(int i=0;i<2;i++) {
for(int j=0;j<2;j++) {
printf("(%d,%d):%d\n",i,j,cc[i][j]);
}
}
return cudaThreadExit();
}
I recommend you study it until you understand what it does, and why it is such a poor idea compared to using linear memory.
You don't need to use for loops inside the device. Try this code.
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#define N 800
__global__ void matrixAdd(float* A, float* B, float* C){
int i = threadIdx.x;
int j = blockIdx.x;
C[N*j+i] = A[N*j+i] + B[N*j+i];
}
int main (void) {
clock_t start = clock();
float a[N][N], b[N][N], c[N][N];
float *dev_a, *dev_b, *dev_c;
cudaMalloc((void **)&dev_a, N * N * sizeof(float));
cudaMalloc((void **)&dev_b, N * N * sizeof(float));
cudaMalloc((void **)&dev_c, N * N * sizeof(float));
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
a[i][j] = rand() % 10;
b[i][j] = rand() % 10;
}
}
cudaMemcpy(dev_a, a, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * N * sizeof(float), cudaMemcpyHostToDevice);
matrixAdd <<<N,N>>> (dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * N * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++){
printf("[%d, %d ]= %f + %f = %f\n",i,j, a[i][j], b[i][j], c[i][j]);
}
}
printf("Time elapsed: %f\n", ((double)clock() - start) / CLOCKS_PER_SEC);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
Related
Here I'm trying to access a dynamically allocated array in CUDA. However, after running the output is c[0][0] = 0. Am I accessing the allocated array correctly? I think the way I'm copying the arrays is probably correct and for some reason, the value of C has not been changed on the device.
#include<iostream>
using namespace std;
__global__ void add_matrix(float *A, float *B, float *C, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < n) && (j < n)){
C[i*n+j] = A[i*n+j] + B[i*n+j];
}
}
int main(){
const size_t N = 1024;
const size_t size = N * N * sizeof(float);
float *A, *B, *C;
A = (float*) malloc(size);
B = (float*) malloc(size);
C = (float*) malloc(size);
for (size_t i=0; i<N*N; i++){
A[i] = 5.0;
B[i] = 6.0;
}
float *A_d, *B_d, *C_d;
cudaMalloc((void**)&A_d, size);
cudaMalloc((void**)&B_d, size);
cudaMalloc((void**)&C_d, size);
auto code = cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
if (code != cudaSuccess){
cout << "Error copying A to device" << endl;
}
code = cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
if (code != cudaSuccess){
cout << "Error copying B to device" << endl;
}
dim3 threads(N, N);
dim3 blocks(1,1);
add_matrix<<<blocks, threads>>>(A_d, B_d, C_d, N);
code = cudaMemcpy(C, C_d, size, cudaMemcpyDeviceToHost);
if (code != cudaSuccess){
cout << "Error copying C from device" << endl;
}
std::cout << "C[0][0] : " << C[0] << std::endl;
free(A); free(B); free(C);
cudaFree(A_d); cudaFree(B_d); cudaFree(C_d);
return 0;
}
The problem was arranging the blocks. I totally forgot each block can have a limited number of threads. we can obtain the maximum threads per block by getting maxThreadsPerBlock property using cudaDeviceGetAttribute. It seems the Colab GPU supports 1024 threads in each block. so I changed the arrangement this way:
dim3 threads(32,32);
dim3 blocks(32,32);
And it worked
I have a problem with call cudaMemcpyFrom(To)Symbol function in CUDA C++ API. Alternative ideas with storing some parameters between blocks are welcome. Below I've provide some (example) code, that doesn't work correctly.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <ctime>
int avgHost(int*, int);
cudaError_t cudaError;
__device__ int getGlobalIdx()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
__device__ int avg;
__device__ int count;
__device__ int sum;
__global__ void avgKernel(const int *arr)
{
auto idx = getGlobalIdx();
count++;
sum += arr[idx];
avg = sum / count;
}
int main(int argc, char ** argv)
{
const int arraySize = 128;
auto arr1 = (int*)__vcrt_malloc_normal(arraySize * sizeof(int));
srand(time(NULL));
for (size_t i = 0; i < arraySize; i++)
{
arr1[i] = rand() % 100;
}
auto hostAvg = avgHost(arr1, arraySize);
fprintf_s(stdout, "AVG: %d", hostAvg);
free(arr1);
return 0;
}
int avgHost(int* arr, int arraySize)
{
int *dArray = nullptr;
cudaSetDevice(0);
cudaError = cudaMemcpyToSymbol((void *)count, (void*)0, sizeof(int), 0, cudaMemcpyHostToDevice);
if (cudaError)
{
fprintf_s(stderr, "%s\t%d\n", cudaGetErrorString(cudaError), __LINE__);
return -1;
}
cudaMalloc((void**)&dArray, arraySize * sizeof(int));
cudaMemcpy(dArray, arr, arraySize * sizeof(int), cudaMemcpyKind::cudaMemcpyHostToDevice);
avgKernel <<<1, arraySize>>> (dArray);
cudaDeviceSynchronize();
int hostResult = -1;
cudaError = cudaMemcpyFromSymbol(&hostResult, (void *)avg, sizeof(int), 0, cudaMemcpyDeviceToHost);
if (cudaError)
{
fprintf_s(stderr, "%s\t%d\n", cudaGetErrorString(cudaError), __LINE__);
}
cudaFree(dArray);
return hostResult;
}
When I launch code system printed
invalid device symbol 55 AVG: -1
C:\Users\Administrator\source\repos\CudaTests\x64\Debug\cudabase.exe
(process 18152) exited with code 0.
Any ideas?
Remove this lines, because device variable "count" is always 0 before launch kernel.
cudaError = cudaMemcpyToSymbol((void *)count, (void*)0, sizeof(int), 0, cudaMemcpyHostToDevice);
if (cudaError)
{
fprintf_s(stderr, "%s\t%d\n", cudaGetErrorString(cudaError), __LINE__);
return -1;
}
EDIT
cudaError = cudaMemcpyFromSymbol(&hostResult, (void *)avg, sizeof(int), 0, CudaMemcpyDeviceToHost);
TO
cudaError = cudaMemcpyFromSymbol(&hostResult, avg, sizeof(int), 0, cudaMemcpyDeviceToHost);
(remove cast from parameter "avg"). Intellisence provide error, but code compile and run success.
I have read this post Allocate 2D array with cudaMallocPitch and copying with cudaMemcpy2D among many others including NVIDIA docs and I can't get cudaMallocPitch to work together with cudaMemcpy2D.
I need to copy a very big matrix in an array format (Matrix[width*height]) along with a simple array to perform Matrix * vector operations. It is not optional for me to use cudaMallocPitch in order to avoid conflicts and have a better performance.
So, I started by just trying to copy the matrix (vector in my case) to the device and check if it was correctly copied but my code does not print anything. If I use cudaMalloc and cudaMemcpy everything works fine. But I do not know what to do with cudaMallocPitch and cudaMemcpy2D.
What can I do to fix this?
#include <stdio.h>
__global__ void kernel(size_t mpitch, double * A, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < N)
{
double e = *(double *)(((char *) A + idx * mpitch) + N);
printf("(%f)", e);
}
}
int main()
{
int N = 1500;
double * A = new double[N], * d_A;
size_t pitch;
for (int i = 0; i < N; ++i)
{
A[i] = i;
}
cudaMallocPitch(&d_A, &pitch, sizeof(double) * N, 1);
cudaMemcpy2D(d_A, pitch, A, N * sizeof(double), sizeof(double) * N, 1, cudaMemcpyHostToDevice);
unsigned int blocksize = 1024;
unsigned int nblocks = (N + blocksize - 1) / blocksize;
kernel <<<nblocks, blocksize>>>(pitch, d_A, N);
cudaFree(d_A);
delete [] A;
return 0;
}
Error checking can make a big difference in debugging. You should always use it before coming here.
It wasn't clear if you wanted a row or column vector i.e. a matrix of [1xN] or [Nx1]
I've added an explanation on Talomnies suggestion, but first the 'working slabs of code'
Here's [Nx1]
#include <cstdio>
#include <iostream>
#include <cuda.h>
using namespace std;
__global__ void kernel(size_t mpitch, double * A, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx>=N) return;
double e = *(double *)(((char *) A + idx * mpitch));
printf("(%f)", e);
}
int main()
{
int N = 15;
double * A = new double[N], * d_A;
size_t pitch;
for (int i = 0; i < N; ++i)
{
A[i] = i;
}
cudaError_t err = cudaMallocPitch(&d_A, &pitch, sizeof(double), N);
if(err!=cudaSuccess) cout<<"err0:"<<cudaGetErrorString(err)<<endl;
err = cudaMemcpy2D(d_A, pitch, A, sizeof(double), sizeof(double), N, cudaMemcpyHostToDevice);
if(err!=cudaSuccess) cout<<"err1:"<<cudaGetErrorString(err)<<endl;
unsigned int blocksize = 1024;
unsigned int nblocks = (N + blocksize - 1) / blocksize;
kernel <<<nblocks, blocksize>>>(pitch, d_A, N);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(err!=cudaSuccess) cout<<"err2:"<<cudaGetErrorString(err)<<endl;
cudaFree(d_A);
delete [] A;
return 0;
}
[1xN]:
#include <cstdio>
#include <iostream>
#include <cuda.h>
using namespace std;
__global__ void kernel(size_t mpitch, double * A, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx>=N) return;
int row=0;//only one row
double *row_ptr = (double *)( (char *) (A + mpitch * row) );
double e = row_ptr[idx];
printf("(%f)", e);
}
int main()
{
int N = 15;
double * A = new double[N], * d_A;
size_t pitch;
for (int i = 0; i < N; ++i)
{
A[i] = i;
}
cudaError_t err = cudaMallocPitch(&d_A, &pitch, sizeof(double)*N, 1);
if(err!=cudaSuccess) cout<<"err0:"<<cudaGetErrorString(err)<<endl;
err = cudaMemcpy2D(d_A, pitch, A, sizeof(double)*N, sizeof(double)*N, 1, cudaMemcpyHostToDevice);
if(err!=cudaSuccess) cout<<"err1:"<<cudaGetErrorString(err)<<endl;
unsigned int blocksize = 1024;
unsigned int nblocks = (N + blocksize - 1) / blocksize;
kernel <<<nblocks, blocksize>>>(pitch, d_A, N);
cudaDeviceSynchronize();
err = cudaGetLastError();
if(err!=cudaSuccess) cout<<"err2:"<<cudaGetErrorString(err)<<endl;
cudaFree(d_A);
delete [] A;
return 0;
}
Explanation
Firslty, Error Handling:
Considering how easy error handling is in CUDA there isn't a good excuse not to put it in.
cudaError_t err = cudaMallocPitch(&d_A, &pitch, sizeof(double)*N, 1);
if(err!=cudaSuccess) cout<<"err0:"<<cudaGetErrorString(err)<<endl;
Second, you didn't specify if you wanted a column vector or a row vector. Since a row vector is simply a 1-D array in linear memory and you don't need pitched memory to do that, I will assume for this explanation that you meant a column vector.
The reoccurring problem you were having was "misaligned address" in the kernel. This indicates that the problem is book-keeping, so lets walk through the three major steps of handling an aligned 2D array (even though our arrays will be either a column or row vector).
Allocating:
Your allocation was written out as
cudaMallocPitch(&d_A, &pitch, sizeof(double) * N, 1);
This is correct for the row vector as the API is cudaMallocPitch(void*** pointer, size_t* pitch_return, size_t row_width_in_bytes, size_t count_of_rows) However if we would like to do a column vector correct call is
cudaMallocPitch(&d_A, &pitch, sizeof(double), N);
Accessing:
For accessing you were mixing up accessing a row, and accessing an element in the row.
double e = *(double *)(((char *) A + idx * mpitch) + N);
Once again stick to the documentation. The API documentation for cudaMallocPitch includes
T* pElement = (T*)((char*)BaseAddress + Row * pitch) + Column;
for us this translates into
int column=0;
double element=(double*) ((char*)A + idx * mpitch) + column;
I've used column = 0 for completeness since we do not have more than one column.
Copying:
cudaMemcpy2D(d_A, pitch, A, N * sizeof(double), sizeof(double) * N, 1, cudaMemcpyHostToDevice);
For this case this is correct. API for cudaMemcpy2D is
cudaMemcpy2D(void* destination, size_t pitch_from_mallocPitch, const void* source, size_t source_pitch_bytes, size_t src_width_in_bytes, size_t src_rows_count, enum type_of_xfer);
Here is my issue:
I have a 3D array of float3 on my device:
int size[3] = {416,464,512};
cudaExtent extent = make_cudaExtent(size[0]*sizeof(float3),size[1],size[2]);
cudaPitchedPtr renderedVolume;
int ret = cudaMalloc3D(&renderedVolume, extent);
size_t pitch = renderedVolume.pitch; //pitch = 5,120
size_t slicePitch = pitch * size[1]; //slicePitch = 2,375,680
Then I work with it and make it full of outstanding data.
After that I wish to copy it on a 1D linear memory on my host:
float *host_memory = (float*)malloc(size[0]*size[1]*size[2]*sizeof(float3));
cudaMemcpy3DParms p = {0};
p.srcPtr = renderedVolume;
p.dstPtr = make_cudaPitchedPtr(host_memory,size[0]*sizeof(float3),size[0],size[1]);
p.extent = make_cudaExtent(size[0]*sizeof(float3),size[1],size[2]);
p.srcPos = make_cudaPos(0,0,0);
p.dstPos = make_cudaPos(0,0,0);
p.kind=cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p);
I am comparing the result in host_memory with the data I initially wrote tu renderedVolume (my_data) and with the data I read in my 3Dmemory, slice by slice:
float* test1 = (float*)malloc(size[0]*size[1]*sizeof(float3));
cudaMemcpy(test1, myData, size[0]*size[1]*sizeof(float3) , cudaMemcpyDeviceToHost);
float* test2 = (float*)malloc(size[0]*size[1]*sizeof(float3));
cudaMemcpy(test2,(char*)renderedVolume.ptr + slicePitch * i,size[0]*size[1]*sizeof(float3), cudaMemcpyDeviceToHost);
Problem:
The first slice (i=0) is ok, I have the same data in host_memory, test1 and test2.
In the second slice, I have the same data in test1 and test2. However, I should find this data in host_memory+579072 (=number of float per slice, also heigth*pitch of the destination pitched pointer) and I find it in host_memory+577504. It is off by 1568 bytes, which corresponds to nothing that I am aware of, and this is why I would very much appreciate if any of you have an idea of what the problem might be in my code ?
This is a late answer provided to remove this question from the unanswered list.
Below, I'm providing a full code showing how to allocate 3D memory by cudaMalloc3D, moving a host allocated 1D memory to 3D device memory by cudaMemcpy3D, performing some operations on the 3D device data by the test_kernel_3D __global__ function and moving the 3D result data back to 1D host memory, again by cudaMemcpy3D.
The __global__ function test_kernel_3D squares each element of the 3D device memory. In particular, each thread of a 2D grid takes care of performing a for loop along the "depth" dimension.
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<conio.h>
#define BLOCKSIZE_x 16
#define BLOCKSIZE_y 16
#define N 128
#define M 64
#define W 16
/*****************/
/* CUDA MEMCHECK */
/*****************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { getch(); exit(code); }
}
}
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/******************/
/* TEST KERNEL 3D */
/******************/
__global__ void test_kernel_3D(cudaPitchedPtr devPitchedPtr)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
int tidy = blockIdx.y*blockDim.y+threadIdx.y;
char* devPtr = (char*) devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * N;
for (int w = 0; w < W; w++) {
char* slice = devPtr + w * slicePitch;
float* row = (float*)(slice + tidy * pitch);
row[tidx] = row[tidx] * row[tidx];
}
}
/********/
/* MAIN */
/********/
int main()
{
float a[N][M][W];
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
for (int w=0; w<W; w++) {
a[i][j][w] = 3.f;
//printf("row %i column %i depth %i value %f \n",i,j,w,a[i][j][w]);
}
// --- 3D pitched allocation and host->device memcopy
cudaExtent extent = make_cudaExtent(M * sizeof(float), N, W);
cudaPitchedPtr devPitchedPtr;
gpuErrchk(cudaMalloc3D(&devPitchedPtr, extent));
cudaMemcpy3DParms p = { 0 };
p.srcPtr.ptr = a;
p.srcPtr.pitch = M * sizeof(float);
p.srcPtr.xsize = M;
p.srcPtr.ysize = N;
p.dstPtr.ptr = devPitchedPtr.ptr;
p.dstPtr.pitch = devPitchedPtr.pitch;
p.dstPtr.xsize = M;
p.dstPtr.ysize = N;
p.extent.width = M * sizeof(float);
p.extent.height = N;
p.extent.depth = W;
p.kind = cudaMemcpyHostToDevice;
gpuErrchk(cudaMemcpy3D(&p));
dim3 GridSize(iDivUp(M,BLOCKSIZE_x),iDivUp(N,BLOCKSIZE_y));
dim3 BlockSize(BLOCKSIZE_y,BLOCKSIZE_x);
test_kernel_3D<<<GridSize,BlockSize>>>(devPitchedPtr);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
p.srcPtr.ptr = devPitchedPtr.ptr;
p.srcPtr.pitch = devPitchedPtr.pitch;
p.dstPtr.ptr = a;
p.dstPtr.pitch = M * sizeof(float);
p.kind = cudaMemcpyDeviceToHost;
gpuErrchk(cudaMemcpy3D(&p));
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
for (int w=0; w<W; w++)
printf("row %i column %i depth %i value %f\n",i,j,w,a[i][j][w]);
getch();
return 0;
}
The following is a CUDA programming example which is basically C but with NVidia CUDA functions within. I've been trying to interpret this code example and figure out what it is trying to do. My question is this the program compiles just fine, but what arguments does it take? For example this CUDA program is being run in a linux emulator however upon running ./program it returns:
Usage: ./program number
Segmentation fault
What are the programs input arguments. Thank you.
#include <assert.h>
#include <stdio.h>
//#define N 100000
__host__ void saxpy_host(int length, float alpha, float * x, float * y)
{
for (int i = 0; i < length; ++i)
y[i] = alpha*x[i] + y[i];
}
__global__ void saxpy (int length, float alpha, float * x, float * y)
{
int i;
i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < length) y[i] = alpha*x[i]+y[i];
__syncthreads();
}
int main(int argc, char* argv[]) {
if (argc != 2) {
printf("Usage: %s number\n", argv[0]);
return -1;
}
int N = atoi(argv[1]);
// host data
float alpha = 0.5;
float x[N], xback[N];
float y[N], yback[N];
int size;
int i;
int blocks;
// determining size
size = sizeof(float)*N;
// device data
float * dxp, * dyp;
// fill host data
for (i = 0; i < N; i++) {
x[i] = (float) (rand () % 128);
y[i] = (float) (rand () % 256);
}
// Allocating and Moving data to device
cudaMalloc((void**) &dxp, size);
cudaMalloc((void**) &dyp, size);
cudaMemcpy (dxp, x, size, cudaMemcpyHostToDevice);
cudaMemcpy (dyp, y, size, cudaMemcpyHostToDevice);
// size of thread blocks
blocks = (N + 31)/32;
saxpy <<< blocks, 32 >>> (N, alpha, dxp, dyp);
// bring back data
cudaMemcpy (xback, dxp, size, cudaMemcpyDeviceToHost);
cudaMemcpy (yback, dyp, size, cudaMemcpyDeviceToHost);
// Calculating host SAXPY
saxpy_host (N, alpha, (float *) &x, (float *) &y);
// checking computation on host matches computation on GPU
for (i = 0; i < N; i++) {
assert (yback[i] == y[i]) ;
//printf ("%i %f %f \n", i, yback[i], y[i]);
}
// free device data
cudaFree(dxp); cudaFree(dyp);
return 0;
}
int N = atoi(argv[1]);
The program takes a single integer as a command line argument. (Try calling it as ./program 5, for example.)
It then calculates a SAXPY (An old term originating from early BLAS implementations, but it stuck. It means "single (precision, aka float) real alpha x plus y".) with vectors of dimension N.