I just have a question about my cuda program that I wrote. It allows me to enter the size of the matrix, col and rows. Say I enter ~1124 and it computes fine. However say I enter 1149 it Seg faults AFTER computing in the device(I think it's seg faulting during the copy back). But say I enter 2000 it seg faults BEFORE computing in the device(I think it seg faults during the copy over). I think my issue is all with memory management. If you guys could point me in the right direction I'd appreciate it.
I udpated the code with how it is called. In the new edit(at the bottom) it contains: sumMatrix(blank matrix with the size of eleCount1, which is the size of the entire matrix), matrixOne(first matrix),matrixTwo(second matrix, allocated same way matrix1 is done),eleCount1(entire size of matrix). Both matrixOne and two are read in from a file.
Wasn't sure if someone needed to see this stuff about my GPU:
Total amount of constant memory: 65536 bytes
Total amount of shared memory per block: 49152 bytes
Total number of registers available per block: 32768
Warp size: 32
Maximum number of threads per block: 1024
Maximum sizes of each dimension of a block: 1024 x 1024 x 64
Maximum sizes of each dimension of a grid: 65535 x 65535 x 65535
The code is:
void addKernel(float *c, float *a, float *b)
{
int i = threadIdx.x;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
c[idx] = a[idx] + b[idx];
}
cudaError_t addWithCuda(float *c, float *a, float *b, size_t size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
cudaError_t cudaStatus;
blocksNeeded=(size/MAXTHREADS)+1;
int threadsPerBlock = MAXTHREADS/blocksNeeded+1;
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
addKernel<<<blocksNeeded, size>>>(dev_c, dev_a, dev_b);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
//edit: added how the matrix are allocated
float* matrixOne = (float*)malloc(sizeof(float)*file1size);
int matrixIndex = 0;
readFromFile(fd,byte, matrixOneWidth, matrixOneHeight, matrixOne);
//matrixOneHeight--;
eleCount1 = matrixOneHeight*matrixOneWidth;
matrixOne= (float*)realloc(matrixOne,eleCount1*sizeof(float));
//Edit: Added how the addWithCuda is called.
cudaStatus = addWithCuda(sumMatrix, matrixOne,matrixTwo,eleCount1);
//sumMatrix is created after we know how large the matrices are.
float sumMatrix[eleCount1];
You are not testing the bounds of your computation inside the kernel. If the total amount of work does not evenly divide to the size of a block, some threads will try to write to indices that are outside the output array. I suggest you also pass the size as a parameter to the kernel and introduce the check:
__global__ void addKernel(float *c, float *a, float *b, int size)
{
int i = threadIdx.x;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < size) c[idx] = a[idx] + b[idx];
}
I see that you are indexing into arrays a, b and c in your kernel, but you do not check to make sure that the index is within the array bounds. You are therefore writing into memory that you do not own, causing seg faults in random places.
Related
I'm wanting to find a way to dynamically calculate the necessary grid and block size for a calculation. I have run into the issue that the problem that I am wanting to handle is simply too large to handle in a single run of the GPU from a thread limit perspective. Here is a sample kernel setup which runs into the error that I am having:
__global__ void populateMatrixKernel(char * outMatrix, const int pointsToPopulate)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < pointsToPopulate)
{
outMatrix[i] = 'A';
}
}
cudaError_t populateMatrixCUDA(char * outMatrix, const int pointsToPopulate, cudaDeviceProp &deviceProp)
{
//Device arrays to be used
char * dev_outMatrix = 0;
cudaError_t cudaStatus;
//THIS IS THE CODE HERE I'M WANTING TO REPLACE
//Calculate the block and grid parameters
auto gridDiv = div(pointsToPopulate, deviceProp.maxThreadsPerBlock);
auto gridX = gridDiv.quot;
if (gridDiv.rem != 0)
gridX++; //Round up if we have stragling points to populate
auto blockSize = deviceProp.maxThreadsPerBlock;
int gridSize = min(16 * deviceProp.multiProcessorCount, gridX);
//END REPLACE CODE
//Allocate GPU buffers
cudaStatus = cudaMalloc((void**)&dev_outMatrix, pointsToPopulate * sizeof(char));
if (cudaStatus != cudaSuccess)
{
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
populateMatrixKernel << <gridSize, blockSize >> > (dev_outMatrix, pointsToPopulate);
//Check for errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
cerr << "Population launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
//Wait for threads to finish
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching visit and bridger analysis kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
//Copy output to host memory
cudaStatus = cudaMemcpy(outMatrix, dev_outMatrix, pointsToPopulate * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
Error:
cudaFree(dev_outMatrix);
return cudaStatus;
}
Now, when I test this code using the following testing setup:
//Make sure we can use the graphics card (This calculation would be unresonable otherwise)
if (cudaSetDevice(0) != cudaSuccess) {
cerr << "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?" << endl;
}
cudaDeviceProp deviceProp;
cudaError_t cudaResult;
cudaResult = cudaGetDeviceProperties(&deviceProp, 0);
if (cudaResult != cudaSuccess)
{
cerr << "cudaGetDeviceProperties failed!" << endl;
}
int pointsToPopulate = 250000 * 300;
auto gpuMatrix = new char[pointsToPopulate];
fill(gpuMatrix, gpuMatrix + pointsToPopulate, 'B');
populateMatrixCUDA(gpuMatrix, pointsToPopulate, deviceProp);
for (int i = 0; i < pointsToPopulate; ++i)
{
if (gpuMatrix[i] != 'A')
{
cout << "ERROR: " << i << endl;
cin.get();
}
}
I get an error at i=81920. Moreover, if I check the memory before and after the execution, all of the memory values after 81920 go from 'B' to null. It seems that this error is originating from this line in the kernel execution parameter code:
int gridSize = min(16 * deviceProp.multiProcessorCount, gridX);
For my graphics card (GTX 980M) I get out a value for deviceProp.multiProcessorCount of 5, and if I multiply this by 16 and 1024 (for max blocks per grid) I get out the 81920. It seems that, while I am fine on the memory space side of things, I am getting choked by how many threads I can run. Now, this 16 is just being set as an arbitrary value (after looking at some example code my friend made), I was wondering if there was a way to actually calculate "What 16 should be" based on the GPUs properties instead of setting it arbitrarily. I'm wanting to write an iterative code that is able to determine the maximum amount of calculations that are able to be performed at one point in time, and then fill the matrix piece by piece accordingly, but I need to know the maximum calculation value to do this. Does anyone know of a way to calculate these parameters? If any more information is needed, I'm happy to oblige. Thank you!
There is fundamentally nothing wrong with the code you have posted. It is probably close to best practice. But it isn't compatible with the design idiom of your kernel.
As you can see here, your GPU is capable of running 2^31 - 1 or 2147483647 blocks. So you could change the code in question to this:
unsigned int gridSize = min(2147483647u, gridX);
and it should probably work. Better still, don't change that code at all, but change your kernel to something like this:
__global__ void populateMatrixKernel(char * outMatrix, const int pointsToPopulate)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(; i < pointsToPopulate; i += blockDim.x * gridDim.x)
{
outMatrix[i] = 'A';
}
}
That way your kernel will emit multiple outputs per thread and everything should should just work as it is intended.
I'm very new to CUDA programming and I'm facing a problem which is driving me crazy. What's going on:
I have very simple program (just for study purpose) where one input image and one output image 16x16 is created. The input image is initialized to values from 0..255 and then it is bound to texture. The CUDA kernel just copies the input image to the output image. The input image values are obtained by calling the tex1Dfetch() which returns very strange values in some cases. Please see the code below, the comments inside the kernel and the output of the program. The code is complete and compilable so that you can create a CUDA project in VC and paste the code to the main ".cu" file.
Please help me! What I'm doing wrong?
I'm using VS 2013 Community and CUDA SDK 6.5 + CUDA integration for VS 2013.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
texture<unsigned char> tex;
cudaError_t testMyKernel(unsigned char * inputImg, unsigned char * outputImg, int width, int height);
__global__ void myKernel(unsigned char *outImg, int width)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int idx = row*width + col;
__shared__ unsigned char input;
__shared__ unsigned char input2;
unsigned char *outPix = outImg + idx;
//It fetches strange value, for example, when the idx==0 then the input is 51.
//But I expect that input==idx (according to the input image initialization).
input = tex1Dfetch(tex, idx);
printf("Fetched for idx=%d: %d\n", idx, input);
*outPix = input;
//Very strange is that when I test the following code then the tex1Dfetch() returns correct values.
if (idx == 0)
{
printf("\nKernel test print:\n");
for (int i = 0; i < 256; i++)
{
input2 = tex1Dfetch(tex, i);
printf("%d,", input2);
}
}
}
int main()
{
const int width = 16;
const int height = 16;
const int count = width * height;
unsigned char imgIn[count];
unsigned char imgOut[count];
for (int i = 0; i < count; i++)
{
imgIn[i] = i;
}
cudaError_t cudaStatus = testMyKernel(imgIn, imgOut, width, height);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "testMyKernel failed!");
return 1;
}
printf("\n\nOutput values:\n");
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
printf("%d,", imgOut[i * width + j]);
}
}
printf("\n");
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
getchar();
return 0;
}
cudaError_t testMyKernel(unsigned char * inputImg, unsigned char * outputImg, int width, int height)
{
unsigned char * dev_in;
unsigned char * dev_out;
size_t size = width * height * sizeof(unsigned char);
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// input data
cudaStatus = cudaMalloc((void**)&dev_in, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_in, inputImg, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaBindTexture(NULL, tex, dev_in, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaBindTexture failed!");
goto Error;
}
// output data
cudaStatus = cudaMalloc((void**)&dev_out, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
dim3 threadsPerBlock(4, 4);
int blk_x = width / threadsPerBlock.x;
int blk_y = height / threadsPerBlock.y;
dim3 numBlocks(blk_x, blk_y);
// Launch a kernel on the GPU with one thread for each element.
myKernel<<<numBlocks, threadsPerBlock>>>(dev_out, width);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "myKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
goto Error;
}
//copy output image to host
cudaStatus = cudaMemcpy(outputImg, dev_out, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaUnbindTexture(tex);
cudaFree(dev_in);
cudaFree(dev_out);
return cudaStatus;
}
And here is the output of the program (truncated little bit):
Fetched for idx=0: 51
Fetched for idx=1: 51
Fetched for idx=2: 51
Fetched for idx=3: 51
Fetched for idx=16: 51
Fetched for idx=17: 51
Fetched for idx=18: 51
Fetched for idx=19: 51
Fetched for idx=32: 51
Fetched for idx=33: 51
Fetched for idx=34: 51
Fetched for idx=35: 51
Fetched for idx=48: 51
Fetched for idx=49: 51
Fetched for idx=50: 51
Fetched for idx=51: 51
Fetched for idx=192: 243
Fetched for idx=193: 243
Fetched for idx=194: 243
Fetched for idx=195: 243
Fetched for idx=208: 243
Fetched for idx=209: 243
Fetched for idx=210: 243
Fetched for idx=211: 243
Fetched for idx=224: 243
etc... (output truncated.. see the Output values)
Kernel test print:
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,
30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56
etc...(correct values)
Output values:
51,51,51,51,55,55,55,55,59,59,59,59,63,63,63,63,51,51,51,51,55,55,55,55,59,59,59
,59,63,63,63,63,51,51,51,51,55,55,55,55,59,59,59,59,63,63,63,63,51,51,51,51,55,55,
etc.. (wrong values)
This line of the kernel
input = tex1Dfetch(tex, idx);
is causing race condition among the threads of a block. All threads in a block are trying to fetch value from texture into the __shared__ variable input simultaneously causing undefined behavior. You should allocate separate shared memory space for each thread of the block in the form of a __shared__ array.
For you current case, it may be something like
__shared__ unsigned char input[16]; //4 x 4 block size
The rest of the kernel should look something like:
int idx_local = threadIdx.y * blockDim.x + threadIdx.x; //local id of thread in a block
input[idx_local] = tex1Dfetch(tex, idx);
printf("Fetched for idx=%d: %d\n", idx, input[idx_local]);
*outPix = input[idx_local];
The code inside the condition at the end of the kernel is working fine because due to the specified condition if (idx == 0), only the first thread of the first block will do all the processing serially while all other threads would remain idle, so problem will disappear due to absence of race condition.
In my toy example I first multiply matrices of size 32x32, 100 000 times, and after that I calculate scalar products of two vectors of size 1024, 100 000 times again. For the first I used cublasSgemm, for the second - cublasSdot.
As a result, time for first calculation is 530 msec, for the second - 10 000 msec. However, in order to multiply matrices we need to perform 32^3 operations (multiply-add), and for scalar product just 1024=32^2 operations.
So why am I getting such result? Here is the code:
__device__ float res;
void randomInit(float *data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
int main(){
cublasHandle_t handle;
float out;
cudaError_t cudaerr;
cudaEvent_t start1, stop1,start2,stop2;
cublasStatus_t stat;
int size = 32;
int num = 100000;
float *h_A = new float[size*size];
float *h_B = new float[size*size];
float *h_C = new float[size*size];
float *d_A, *d_B, *d_C;
const float alpha = 1.0f;
const float beta = 0.0f;
randomInit(h_A, size*size);
randomInit(h_B, size*size);
cudaMalloc((void **)&d_A, size *size *sizeof(float));
cudaMalloc((void **)&d_B, size *size * sizeof(float));
cudaMalloc((void **)&d_C, size *size * sizeof(float));
stat = cublasCreate(&handle);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, d_A, size,
d_B, size, &beta, d_C, size);
cudaEventRecord(start1, NULL);
cudaMemcpy(d_A, h_A, size *size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size *size * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < num; i++){
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, size, size, size, &alpha, d_A,
size, d_B, size, &beta, d_C, size);
}
cudaMemcpy(h_C, d_C, size*size*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
std::cout <<"total time for MAtMul:" << msecTotal1 << "\n";
cudaEventRecord(start2, NULL);
cudaMemcpy(d_A, h_A, size *size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size *size * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < num; i++){
cublasSdot(handle, 1024, d_A , 1, d_B , 1, &res);
}
cudaEventRecord(stop2, NULL);
cudaEventSynchronize(stop2);
float msecTotal2 = 0.0f;
cudaEventElapsedTime(&msecTotal2, start2, stop2);
std::cout << "total time for dotVec:" << msecTotal2 << "\n";
cublasDestroy(handle);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
delete[] h_A;
delete[] h_B;
delete[] h_C;
return 1;
}
Update: I tried also to perform dot product with cublasSgemm by treating vector as 1 by 1024 matrix. The result is 3550 msec, which is better, but still 7 times more then in the first calculation.
One problem is that you're not handling the pointer mode correctly for the call to cublasSdot.
You'll want to read this section of the manual.
Furthermore this:
cublasSdot(handle, 1024, d_A , 1, d_B , 1, &res);
^^^^
is illegal under any circumstances. It is not legal in CUDA to take the address of a device variable in host code. You can certainly do it, but the results are garbage.
When I modify your code as follows:
cublasSetPointerMode(handle, CUBLAS_POINTER_MODE_DEVICE);
float *dres;
cudaMalloc(&dres, sizeof(float));
cudaEventRecord(start2, NULL);
cudaMemcpy(d_A, h_A, size *size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size *size * sizeof(float), cudaMemcpyHostToDevice);
for (int i = 0; i < num; i++){
if(cublasSdot(handle, 1024, d_A , 1, d_B , 1, dres) != CUBLAS_STATUS_SUCCESS) {std::cout << ".";}
}
I get about a 2:1 ratio of execution time for cublasSdot to cublasSgemm which may be plausible, particularly for these sizes. Under the hood, the dot operation implies a parallel reduction. 1024 threads can compute the partial results, but then a 1024-thread-wide parallel reduction is required. The gemm does not need a parallel reduction, and so may be quicker. 1024 threads can be assigned to produce the 1024 results each in a single thread. For a memory-bound algorithm, the difference between 32^2 and 32^3 operations may not be that significant, but the parallel reduction implies significant additional operations. When I then change size in your program from 32 to 128, I see the ratio reverse, and the matrix multiply does indeed become 3x longer than the dot product.
I have wrote a simple sum reduction code which seems to work just fine until i increase array size to 1 million what can be the problem.
#define BLOCK_SIZE 128
#define ARRAY_SIZE 10000
cudaError_t addWithCuda(const long *input, long *output, int totalBlocks, size_t size);
__global__ void sumKernel(const long *input, long *output)
{
int tid = threadIdx.x;
int bid = blockDim.x * blockIdx.x;
__shared__ long data[BLOCK_SIZE];
if(bid+tid < ARRAY_SIZE)
data[tid] = input[bid+tid];
else
data[tid] = 0;
__syncthreads();
for(int i = BLOCK_SIZE/2; i >= 1; i >>= 1)
{
if(tid < i)
data[tid] += data[tid + i];
__syncthreads();
}
if(tid == 0)
output[blockIdx.x] = data[0];
}
int main()
{
int totalBlocks = ARRAY_SIZE/BLOCK_SIZE;
if(ARRAY_SIZE % BLOCK_SIZE != 0)
totalBlocks++;
long *input = (long*) malloc(ARRAY_SIZE * sizeof(long) );
long *output = (long*) malloc(totalBlocks * sizeof(long) );
for(int i=0; i<ARRAY_SIZE; i++)
{
input[i] = i+1 ;
}
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(input, output, totalBlocks, ARRAY_SIZE);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
long ans = 0;
for(int i =0 ; i < totalBlocks ;i++)
{
ans = ans + output[i];
}
printf("Final Ans : %ld",ans);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
getchar();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(const long *input, long *output, int totalBlocks, size_t size)
{
long *dev_input = 0;
long *dev_output = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors (one input, one output) .
cudaStatus = cudaMalloc((void**)&dev_input, size * sizeof(long));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_output, totalBlocks * sizeof(long));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, size * sizeof(long), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_output, output, (totalBlocks) * sizeof(long), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
sumKernel<<<totalBlocks, BLOCK_SIZE>>>(dev_input, dev_output);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(output, dev_output, totalBlocks * sizeof(long), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_input);
cudaFree(dev_output);
return cudaStatus;
}
and just for the reference if it has to do somthing with my GPU device, my GPU is GTXX 650ti.
and here is the info about GPU:
Maximum number of threads per multiprocessor: 2048
Maximum number of threads per block: 1024
Maximum sizes of each dimension of a block: 1024 x 1024 x 64
Maximum sizes of each dimension of a grid: 2147483647 x 65535 x 65535
Maximum memory pitch: 2147483647 bytes
Texture alignment: 512 bytes
Actually the answer =could not fit in long as well so after using long double for datatypes this issue was resolved. Thanks all!
One problem in your code is that your last cudaMemcpy is not set up correctly:
cudaMemcpy(output, dev_output, totalBlocks * sizeof(int), cudaMemcpyDeviceToHost);
All of your data is long data so you should be copying using sizeof(long) not sizeof(int)
Another problem in your code is using the wrong printf format identifier for a long datatype:
printf("\n %d \n",output[i]);
use something like this instead:
printf("\n %ld \n",output[i]);
You may also have a problem with a large block count if you are not compiling for sm_30 architecture. In that case, proper cuda error checking would identify the problem.
You don't check for errors after sumKernel<<<totalBlocks, BLOCK_SIZE>>>(dev_input, dev_output);. Normally, if you would check for the last occured error it should give the error invalid configuration argument. Try adding the following after the sumKernel line.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf(stderr, "sumKernel failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
See this question for more information about the error.
I'm having problems grasping why my function that finds maximum and minimum in a range of doubles using CUBLAS doesn't work properly.
The code is as follows:
void findMaxAndMinGPU(double* values, int* max_idx, int* min_idx, int n)
{
double* d_values;
cublasHandle_t handle;
cublasStatus_t stat;
safecall( cudaMalloc((void**) &d_values, sizeof(double) * n), "cudaMalloc (d_values) in findMaxAndMinGPU");
safecall( cudaMemcpy(d_values, values, sizeof(double) * n, cudaMemcpyHostToDevice), "cudaMemcpy (h_values > d_values) in findMaxAndMinGPU");
cublasCreate(&handle);
stat = cublasIdamax(handle, n, d_values, sizeof(double), max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIdamin(handle, n, d_values, sizeof(double), min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
cudaFree(d_values);
cublasDestroy(handle);
}
Where values is the values to search within. The max_idx and min_idx are the index of the found numbers in values.
The results from the CUBLAS-calls seems rather random and output wrong indexes.
Anyone with a golly good answer to my problem? I am a tad sad at the moment :(
One of your arguments to both the cublasIdamax and cublasIdamin calls are wrong. The incx argument in BLAS level 1 calls should always be the stride of the input in words, not bytes. So I suspect that you want something more like:
stat = cublasIdamax(handle, n, d_values, 1, max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIdamin(handle, n, d_values, 1, min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
By using sizeof(double) you are telling the routines to use a stride of 8, which will have the calls overrun the allocated storage of the input array and into uninitialised memory. I presume you actually have a stride of 1 in d_values.
Edit: Here is a complete runnable example which works correctly. Note I switched the code to single precision because I don't presently have access to double precision capable hardware:
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cstdio>
#include <cstdlib>
#include <sys/time.h>
typedef float Real;
void findMaxAndMinGPU(Real* values, int* max_idx, int* min_idx, int n)
{
Real* d_values;
cublasHandle_t handle;
cublasStatus_t stat;
cudaMalloc((void**) &d_values, sizeof(Real) * n);
cudaMemcpy(d_values, values, sizeof(Real) * n, cudaMemcpyHostToDevice);
cublasCreate(&handle);
stat = cublasIsamax(handle, n, d_values, 1, max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIsamin(handle, n, d_values, 1, min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
cudaFree(d_values);
cublasDestroy(handle);
}
int main(void)
{
const int vmax=1000, nvals=10000;
float vals[nvals];
srand ( time(NULL) );
for(int j=0; j<nvals; j++) {
vals[j] = float(rand() % vmax);
}
int minIdx, maxIdx;
findMaxAndMinGPU(vals, &maxIdx, &minIdx, nvals);
int cmin = 0, cmax=0;
for(int i=1; i<nvals; i++) {
cmin = (vals[i] < vals[cmin]) ? i : cmin;
cmax = (vals[i] > vals[cmax]) ? i : cmax;
}
fprintf(stdout, "%d %d %d %d\n", minIdx, cmin, maxIdx, cmax);
return 0;
}
which when compiled and run gives this:
$ g++ -I/usr/local/cuda/include -L/usr/local/cuda/lib cublastest.cc -lcudart -lcublas
$ ./a.out
273 272 85 84
note that CUBLAS follows the FORTRAN convention and uses 1 indexing, rather than zero indexing, which is why there is a difference of 1 between the CUBLAS and CPU versions.
from description: The element of the maximum magnitude:
http://docs.nvidia.com/cuda/cublas/index.html#topic_6_1
if you have { 1, 2, 3, -33, 22, 11 }
result will be 4! not 5
abs(-33) > 22