CUDA tex1Dfetch() wrong behaviour - c++

I'm very new to CUDA programming and I'm facing a problem which is driving me crazy. What's going on:
I have very simple program (just for study purpose) where one input image and one output image 16x16 is created. The input image is initialized to values from 0..255 and then it is bound to texture. The CUDA kernel just copies the input image to the output image. The input image values are obtained by calling the tex1Dfetch() which returns very strange values in some cases. Please see the code below, the comments inside the kernel and the output of the program. The code is complete and compilable so that you can create a CUDA project in VC and paste the code to the main ".cu" file.
Please help me! What I'm doing wrong?
I'm using VS 2013 Community and CUDA SDK 6.5 + CUDA integration for VS 2013.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
texture<unsigned char> tex;
cudaError_t testMyKernel(unsigned char * inputImg, unsigned char * outputImg, int width, int height);
__global__ void myKernel(unsigned char *outImg, int width)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int idx = row*width + col;
__shared__ unsigned char input;
__shared__ unsigned char input2;
unsigned char *outPix = outImg + idx;
//It fetches strange value, for example, when the idx==0 then the input is 51.
//But I expect that input==idx (according to the input image initialization).
input = tex1Dfetch(tex, idx);
printf("Fetched for idx=%d: %d\n", idx, input);
*outPix = input;
//Very strange is that when I test the following code then the tex1Dfetch() returns correct values.
if (idx == 0)
{
printf("\nKernel test print:\n");
for (int i = 0; i < 256; i++)
{
input2 = tex1Dfetch(tex, i);
printf("%d,", input2);
}
}
}
int main()
{
const int width = 16;
const int height = 16;
const int count = width * height;
unsigned char imgIn[count];
unsigned char imgOut[count];
for (int i = 0; i < count; i++)
{
imgIn[i] = i;
}
cudaError_t cudaStatus = testMyKernel(imgIn, imgOut, width, height);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "testMyKernel failed!");
return 1;
}
printf("\n\nOutput values:\n");
for (int i = 0; i < height; i++)
{
for (int j = 0; j < width; j++)
{
printf("%d,", imgOut[i * width + j]);
}
}
printf("\n");
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
getchar();
return 0;
}
cudaError_t testMyKernel(unsigned char * inputImg, unsigned char * outputImg, int width, int height)
{
unsigned char * dev_in;
unsigned char * dev_out;
size_t size = width * height * sizeof(unsigned char);
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// input data
cudaStatus = cudaMalloc((void**)&dev_in, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_in, inputImg, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaBindTexture(NULL, tex, dev_in, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaBindTexture failed!");
goto Error;
}
// output data
cudaStatus = cudaMalloc((void**)&dev_out, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
dim3 threadsPerBlock(4, 4);
int blk_x = width / threadsPerBlock.x;
int blk_y = height / threadsPerBlock.y;
dim3 numBlocks(blk_x, blk_y);
// Launch a kernel on the GPU with one thread for each element.
myKernel<<<numBlocks, threadsPerBlock>>>(dev_out, width);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "myKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching myKernel!\n", cudaStatus);
goto Error;
}
//copy output image to host
cudaStatus = cudaMemcpy(outputImg, dev_out, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaUnbindTexture(tex);
cudaFree(dev_in);
cudaFree(dev_out);
return cudaStatus;
}
And here is the output of the program (truncated little bit):
Fetched for idx=0: 51
Fetched for idx=1: 51
Fetched for idx=2: 51
Fetched for idx=3: 51
Fetched for idx=16: 51
Fetched for idx=17: 51
Fetched for idx=18: 51
Fetched for idx=19: 51
Fetched for idx=32: 51
Fetched for idx=33: 51
Fetched for idx=34: 51
Fetched for idx=35: 51
Fetched for idx=48: 51
Fetched for idx=49: 51
Fetched for idx=50: 51
Fetched for idx=51: 51
Fetched for idx=192: 243
Fetched for idx=193: 243
Fetched for idx=194: 243
Fetched for idx=195: 243
Fetched for idx=208: 243
Fetched for idx=209: 243
Fetched for idx=210: 243
Fetched for idx=211: 243
Fetched for idx=224: 243
etc... (output truncated.. see the Output values)
Kernel test print:
0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,
30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56
etc...(correct values)
Output values:
51,51,51,51,55,55,55,55,59,59,59,59,63,63,63,63,51,51,51,51,55,55,55,55,59,59,59
,59,63,63,63,63,51,51,51,51,55,55,55,55,59,59,59,59,63,63,63,63,51,51,51,51,55,55,
etc.. (wrong values)

This line of the kernel
input = tex1Dfetch(tex, idx);
is causing race condition among the threads of a block. All threads in a block are trying to fetch value from texture into the __shared__ variable input simultaneously causing undefined behavior. You should allocate separate shared memory space for each thread of the block in the form of a __shared__ array.
For you current case, it may be something like
__shared__ unsigned char input[16]; //4 x 4 block size
The rest of the kernel should look something like:
int idx_local = threadIdx.y * blockDim.x + threadIdx.x; //local id of thread in a block
input[idx_local] = tex1Dfetch(tex, idx);
printf("Fetched for idx=%d: %d\n", idx, input[idx_local]);
*outPix = input[idx_local];
The code inside the condition at the end of the kernel is working fine because due to the specified condition if (idx == 0), only the first thread of the first block will do all the processing serially while all other threads would remain idle, so problem will disappear due to absence of race condition.

Related

C++/CUDA: Calculating maximum gridSize and blockSize dynamically

I'm wanting to find a way to dynamically calculate the necessary grid and block size for a calculation. I have run into the issue that the problem that I am wanting to handle is simply too large to handle in a single run of the GPU from a thread limit perspective. Here is a sample kernel setup which runs into the error that I am having:
__global__ void populateMatrixKernel(char * outMatrix, const int pointsToPopulate)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < pointsToPopulate)
{
outMatrix[i] = 'A';
}
}
cudaError_t populateMatrixCUDA(char * outMatrix, const int pointsToPopulate, cudaDeviceProp &deviceProp)
{
//Device arrays to be used
char * dev_outMatrix = 0;
cudaError_t cudaStatus;
//THIS IS THE CODE HERE I'M WANTING TO REPLACE
//Calculate the block and grid parameters
auto gridDiv = div(pointsToPopulate, deviceProp.maxThreadsPerBlock);
auto gridX = gridDiv.quot;
if (gridDiv.rem != 0)
gridX++; //Round up if we have stragling points to populate
auto blockSize = deviceProp.maxThreadsPerBlock;
int gridSize = min(16 * deviceProp.multiProcessorCount, gridX);
//END REPLACE CODE
//Allocate GPU buffers
cudaStatus = cudaMalloc((void**)&dev_outMatrix, pointsToPopulate * sizeof(char));
if (cudaStatus != cudaSuccess)
{
cerr << "cudaMalloc failed!" << endl;
goto Error;
}
populateMatrixKernel << <gridSize, blockSize >> > (dev_outMatrix, pointsToPopulate);
//Check for errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
cerr << "Population launch failed: " << cudaGetErrorString(cudaStatus) << endl;
goto Error;
}
//Wait for threads to finish
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
cerr << "cudaDeviceSynchronize returned error code " << cudaStatus << " after launching visit and bridger analysis kernel!" << endl;
cout << "Cuda failure " << __FILE__ << ":" << __LINE__ << " '" << cudaGetErrorString(cudaStatus);
goto Error;
}
//Copy output to host memory
cudaStatus = cudaMemcpy(outMatrix, dev_outMatrix, pointsToPopulate * sizeof(char), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
cerr << "cudaMemcpy failed!" << endl;
goto Error;
}
Error:
cudaFree(dev_outMatrix);
return cudaStatus;
}
Now, when I test this code using the following testing setup:
//Make sure we can use the graphics card (This calculation would be unresonable otherwise)
if (cudaSetDevice(0) != cudaSuccess) {
cerr << "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?" << endl;
}
cudaDeviceProp deviceProp;
cudaError_t cudaResult;
cudaResult = cudaGetDeviceProperties(&deviceProp, 0);
if (cudaResult != cudaSuccess)
{
cerr << "cudaGetDeviceProperties failed!" << endl;
}
int pointsToPopulate = 250000 * 300;
auto gpuMatrix = new char[pointsToPopulate];
fill(gpuMatrix, gpuMatrix + pointsToPopulate, 'B');
populateMatrixCUDA(gpuMatrix, pointsToPopulate, deviceProp);
for (int i = 0; i < pointsToPopulate; ++i)
{
if (gpuMatrix[i] != 'A')
{
cout << "ERROR: " << i << endl;
cin.get();
}
}
I get an error at i=81920. Moreover, if I check the memory before and after the execution, all of the memory values after 81920 go from 'B' to null. It seems that this error is originating from this line in the kernel execution parameter code:
int gridSize = min(16 * deviceProp.multiProcessorCount, gridX);
For my graphics card (GTX 980M) I get out a value for deviceProp.multiProcessorCount of 5, and if I multiply this by 16 and 1024 (for max blocks per grid) I get out the 81920. It seems that, while I am fine on the memory space side of things, I am getting choked by how many threads I can run. Now, this 16 is just being set as an arbitrary value (after looking at some example code my friend made), I was wondering if there was a way to actually calculate "What 16 should be" based on the GPUs properties instead of setting it arbitrarily. I'm wanting to write an iterative code that is able to determine the maximum amount of calculations that are able to be performed at one point in time, and then fill the matrix piece by piece accordingly, but I need to know the maximum calculation value to do this. Does anyone know of a way to calculate these parameters? If any more information is needed, I'm happy to oblige. Thank you!
There is fundamentally nothing wrong with the code you have posted. It is probably close to best practice. But it isn't compatible with the design idiom of your kernel.
As you can see here, your GPU is capable of running 2^31 - 1 or 2147483647 blocks. So you could change the code in question to this:
unsigned int gridSize = min(2147483647u, gridX);
and it should probably work. Better still, don't change that code at all, but change your kernel to something like this:
__global__ void populateMatrixKernel(char * outMatrix, const int pointsToPopulate)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(; i < pointsToPopulate; i += blockDim.x * gridDim.x)
{
outMatrix[i] = 'A';
}
}
That way your kernel will emit multiple outputs per thread and everything should should just work as it is intended.

Varying results from cuBlas

I have implemented the following CUDA code but i am a little bit confused about the behavior.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include <ctime>
#include <chrono>
#include <string>
#define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1))
void PrintMatrix(float* a, int n)
{
int j, i;
for (j = 1; j <= n; j++)
{
for (i = 1; i <= n; i++)
{
printf("%7.0f", a[IDX2F(i, j, n)]);
}
printf("\n");
}
}
float* CreateMatrix(int n)
{
float* matrix = static_cast<float *>(malloc(n * n * sizeof(float)));
if (!matrix)
{
printf("host memory allocation failed");
return nullptr;
}
for (int j = 1; j <= n; j++)
{
for (int i = 1; i <= n; i++)
{
matrix[IDX2F(i, j, n)] = 2;
}
}
return matrix;
}
long CudaMatrixMultiply(float* matrix, int n)
{
cudaError_t cudaStat;
cublasStatus_t status;
cublasHandle_t handle;
float* deviceMatrix;
cudaStat = cudaMalloc(reinterpret_cast<void**>(&deviceMatrix), n * n * sizeof(float));
if (cudaStat != cudaSuccess)
{
printf("device memory allocation failed");
return EXIT_FAILURE;
}
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("CUBLAS initialization failed\n");
return EXIT_FAILURE;
}
status = cublasSetMatrix(n, n, sizeof(float), matrix, n, deviceMatrix, n);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("data download failed");
cudaFree(deviceMatrix);
cublasDestroy(handle);
return EXIT_FAILURE;
}
float alpha = 1;
float beta = 0;
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &alpha, deviceMatrix, n, deviceMatrix, n, &beta, deviceMatrix, n);
status = cublasGetMatrix(n, n, sizeof(float), deviceMatrix, n, matrix, n);
if (status != CUBLAS_STATUS_SUCCESS)
{
printf("data upload failed");
cudaFree(deviceMatrix);
cublasDestroy(handle);
return EXIT_FAILURE;
}
cudaFree(deviceMatrix);
cublasDestroy(handle);
return EXIT_SUCCESS;
}
float* CpuMatrixMultiply(float* matrix, int size)
{
float* result = new float[size * size]();
// Copied from https://msdn.microsoft.com/en-us/library/hh873134.aspx
for (int row = 1; row <= size; row++)
{
for (int col = 1; col <= size; col++)
{
// Multiply the row of A by the column of B to get the row, column of product.
for (int inner = 1; inner <= size; inner++)
{
// result[row][col] += matrix[row][inner] * matrix[inner][col];
result[IDX2F(col, row, size)] += matrix[IDX2F(inner, row, size)] * matrix[IDX2F(col, inner, size)];
}
}
}
free(matrix);
return result;
}
int main(void)
{
// printf("Matrix * Matrix Test\n");
int size = 1000;
int runs = 10;
for (int run = 0; run != runs; run++)
{
printf("=== Test %d (Matrix * Matrix, Size = %d) ===\n\n", run + 1, size);
printf("RAM usage is: %f GB\n", size * size * sizeof(float) / 1000000000.0);
float* cpuMatrix = CreateMatrix(size);
cpuMatrix = CpuMatrixMultiply(cpuMatrix, size);
PrintMatrix(cpuMatrix, 5);
float* gpuMatrix = CreateMatrix(size);
CudaMatrixMultiply(gpuMatrix, size);
PrintMatrix(gpuMatrix, 5);
free(cpuMatrix);
free(gpuMatrix);
}
getchar();
return EXIT_SUCCESS;
}
The ouput of the CPU version of the MatrixMultiplication is the following as expected:
4000 4000 4000 4000 4000
4000 4000 4000 4000 4000
4000 4000 4000 4000 4000
4000 4000 4000 4000 4000
4000 4000 4000 4000 4000
but the result of the GPU computed is sometimes the right one (see above) or a wrong random(?) one. When the loop is executed the first time then the result was always the right one.
I am not able to find a mistake in my code and it would be great if you could help me.
Additionally if i set size (int the main method) to e.g. 16000 then my driver is crashing and i get an error message. For this i have written a bug report to NVidea because my pc crashed twice. But maybe it is a programming fault by me?
Driver: 364.72 (newest one)
SDK: CUDA Toolkit 7.5
Graphics Card: NVidia GeForce GTX 960 (4GB)
Windows 10 64Bit
Driver Error
Display driver NVIDIA Windows kernel Mode Driver, Version 362.72 stopped responding and has successfully recovered.
Edit: With the help of the community i found out that this is a problem with the watchdog timer. See answer below.
Regarding the second part of the question, following njuffa's remark, you may change the settings for driver behavior to avoid the error when increasing size. Open NSIGHT Monitor and in Options, General, Microsoft Display Driver, change to False the WDDM TDR enabled field.
From spec, the 32bits FPU flops should be around 2.4 TFLOPS in single precision, hence your operation for a 16000 sized matrix should take at the minimum 3.5 seconds. Hence the Driver Recovery after 2 seconds.

Cuda not giving correct answer when array size is larger than 1,000,000

I have wrote a simple sum reduction code which seems to work just fine until i increase array size to 1 million what can be the problem.
#define BLOCK_SIZE 128
#define ARRAY_SIZE 10000
cudaError_t addWithCuda(const long *input, long *output, int totalBlocks, size_t size);
__global__ void sumKernel(const long *input, long *output)
{
int tid = threadIdx.x;
int bid = blockDim.x * blockIdx.x;
__shared__ long data[BLOCK_SIZE];
if(bid+tid < ARRAY_SIZE)
data[tid] = input[bid+tid];
else
data[tid] = 0;
__syncthreads();
for(int i = BLOCK_SIZE/2; i >= 1; i >>= 1)
{
if(tid < i)
data[tid] += data[tid + i];
__syncthreads();
}
if(tid == 0)
output[blockIdx.x] = data[0];
}
int main()
{
int totalBlocks = ARRAY_SIZE/BLOCK_SIZE;
if(ARRAY_SIZE % BLOCK_SIZE != 0)
totalBlocks++;
long *input = (long*) malloc(ARRAY_SIZE * sizeof(long) );
long *output = (long*) malloc(totalBlocks * sizeof(long) );
for(int i=0; i<ARRAY_SIZE; i++)
{
input[i] = i+1 ;
}
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(input, output, totalBlocks, ARRAY_SIZE);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
long ans = 0;
for(int i =0 ; i < totalBlocks ;i++)
{
ans = ans + output[i];
}
printf("Final Ans : %ld",ans);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
getchar();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(const long *input, long *output, int totalBlocks, size_t size)
{
long *dev_input = 0;
long *dev_output = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two vectors (one input, one output) .
cudaStatus = cudaMalloc((void**)&dev_input, size * sizeof(long));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_output, totalBlocks * sizeof(long));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_input, input, size * sizeof(long), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_output, output, (totalBlocks) * sizeof(long), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
sumKernel<<<totalBlocks, BLOCK_SIZE>>>(dev_input, dev_output);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(output, dev_output, totalBlocks * sizeof(long), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_input);
cudaFree(dev_output);
return cudaStatus;
}
and just for the reference if it has to do somthing with my GPU device, my GPU is GTXX 650ti.
and here is the info about GPU:
Maximum number of threads per multiprocessor: 2048
Maximum number of threads per block: 1024
Maximum sizes of each dimension of a block: 1024 x 1024 x 64
Maximum sizes of each dimension of a grid: 2147483647 x 65535 x 65535
Maximum memory pitch: 2147483647 bytes
Texture alignment: 512 bytes
Actually the answer =could not fit in long as well so after using long double for datatypes this issue was resolved. Thanks all!
One problem in your code is that your last cudaMemcpy is not set up correctly:
cudaMemcpy(output, dev_output, totalBlocks * sizeof(int), cudaMemcpyDeviceToHost);
All of your data is long data so you should be copying using sizeof(long) not sizeof(int)
Another problem in your code is using the wrong printf format identifier for a long datatype:
printf("\n %d \n",output[i]);
use something like this instead:
printf("\n %ld \n",output[i]);
You may also have a problem with a large block count if you are not compiling for sm_30 architecture. In that case, proper cuda error checking would identify the problem.
You don't check for errors after sumKernel<<<totalBlocks, BLOCK_SIZE>>>(dev_input, dev_output);. Normally, if you would check for the last occured error it should give the error invalid configuration argument. Try adding the following after the sumKernel line.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
printf(stderr, "sumKernel failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
See this question for more information about the error.

Finding maximum and minimum with CUBLAS

I'm having problems grasping why my function that finds maximum and minimum in a range of doubles using CUBLAS doesn't work properly.
The code is as follows:
void findMaxAndMinGPU(double* values, int* max_idx, int* min_idx, int n)
{
double* d_values;
cublasHandle_t handle;
cublasStatus_t stat;
safecall( cudaMalloc((void**) &d_values, sizeof(double) * n), "cudaMalloc (d_values) in findMaxAndMinGPU");
safecall( cudaMemcpy(d_values, values, sizeof(double) * n, cudaMemcpyHostToDevice), "cudaMemcpy (h_values > d_values) in findMaxAndMinGPU");
cublasCreate(&handle);
stat = cublasIdamax(handle, n, d_values, sizeof(double), max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIdamin(handle, n, d_values, sizeof(double), min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
cudaFree(d_values);
cublasDestroy(handle);
}
Where values is the values to search within. The max_idx and min_idx are the index of the found numbers in values.
The results from the CUBLAS-calls seems rather random and output wrong indexes.
Anyone with a golly good answer to my problem? I am a tad sad at the moment :(
One of your arguments to both the cublasIdamax and cublasIdamin calls are wrong. The incx argument in BLAS level 1 calls should always be the stride of the input in words, not bytes. So I suspect that you want something more like:
stat = cublasIdamax(handle, n, d_values, 1, max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIdamin(handle, n, d_values, 1, min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
By using sizeof(double) you are telling the routines to use a stride of 8, which will have the calls overrun the allocated storage of the input array and into uninitialised memory. I presume you actually have a stride of 1 in d_values.
Edit: Here is a complete runnable example which works correctly. Note I switched the code to single precision because I don't presently have access to double precision capable hardware:
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cstdio>
#include <cstdlib>
#include <sys/time.h>
typedef float Real;
void findMaxAndMinGPU(Real* values, int* max_idx, int* min_idx, int n)
{
Real* d_values;
cublasHandle_t handle;
cublasStatus_t stat;
cudaMalloc((void**) &d_values, sizeof(Real) * n);
cudaMemcpy(d_values, values, sizeof(Real) * n, cudaMemcpyHostToDevice);
cublasCreate(&handle);
stat = cublasIsamax(handle, n, d_values, 1, max_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("Max failed\n");
stat = cublasIsamin(handle, n, d_values, 1, min_idx);
if (stat != CUBLAS_STATUS_SUCCESS)
printf("min failed\n");
cudaFree(d_values);
cublasDestroy(handle);
}
int main(void)
{
const int vmax=1000, nvals=10000;
float vals[nvals];
srand ( time(NULL) );
for(int j=0; j<nvals; j++) {
vals[j] = float(rand() % vmax);
}
int minIdx, maxIdx;
findMaxAndMinGPU(vals, &maxIdx, &minIdx, nvals);
int cmin = 0, cmax=0;
for(int i=1; i<nvals; i++) {
cmin = (vals[i] < vals[cmin]) ? i : cmin;
cmax = (vals[i] > vals[cmax]) ? i : cmax;
}
fprintf(stdout, "%d %d %d %d\n", minIdx, cmin, maxIdx, cmax);
return 0;
}
which when compiled and run gives this:
$ g++ -I/usr/local/cuda/include -L/usr/local/cuda/lib cublastest.cc -lcudart -lcublas
$ ./a.out
273 272 85 84
note that CUBLAS follows the FORTRAN convention and uses 1 indexing, rather than zero indexing, which is why there is a difference of 1 between the CUBLAS and CPU versions.
from description: The element of the maximum magnitude:
http://docs.nvidia.com/cuda/cublas/index.html#topic_6_1
if you have { 1, 2, 3, -33, 22, 11 }
result will be 4! not 5
abs(-33) > 22

CUDA Matrix Addition Seg faults

I just have a question about my cuda program that I wrote. It allows me to enter the size of the matrix, col and rows. Say I enter ~1124 and it computes fine. However say I enter 1149 it Seg faults AFTER computing in the device(I think it's seg faulting during the copy back). But say I enter 2000 it seg faults BEFORE computing in the device(I think it seg faults during the copy over). I think my issue is all with memory management. If you guys could point me in the right direction I'd appreciate it.
I udpated the code with how it is called. In the new edit(at the bottom) it contains: sumMatrix(blank matrix with the size of eleCount1, which is the size of the entire matrix), matrixOne(first matrix),matrixTwo(second matrix, allocated same way matrix1 is done),eleCount1(entire size of matrix). Both matrixOne and two are read in from a file.
Wasn't sure if someone needed to see this stuff about my GPU:
Total amount of constant memory: 65536 bytes
Total amount of shared memory per block: 49152 bytes
Total number of registers available per block: 32768
Warp size: 32
Maximum number of threads per block: 1024
Maximum sizes of each dimension of a block: 1024 x 1024 x 64
Maximum sizes of each dimension of a grid: 65535 x 65535 x 65535
The code is:
void addKernel(float *c, float *a, float *b)
{
int i = threadIdx.x;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
c[idx] = a[idx] + b[idx];
}
cudaError_t addWithCuda(float *c, float *a, float *b, size_t size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
cudaError_t cudaStatus;
blocksNeeded=(size/MAXTHREADS)+1;
int threadsPerBlock = MAXTHREADS/blocksNeeded+1;
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
addKernel<<<blocksNeeded, size>>>(dev_c, dev_a, dev_b);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
//edit: added how the matrix are allocated
float* matrixOne = (float*)malloc(sizeof(float)*file1size);
int matrixIndex = 0;
readFromFile(fd,byte, matrixOneWidth, matrixOneHeight, matrixOne);
//matrixOneHeight--;
eleCount1 = matrixOneHeight*matrixOneWidth;
matrixOne= (float*)realloc(matrixOne,eleCount1*sizeof(float));
//Edit: Added how the addWithCuda is called.
cudaStatus = addWithCuda(sumMatrix, matrixOne,matrixTwo,eleCount1);
//sumMatrix is created after we know how large the matrices are.
float sumMatrix[eleCount1];
You are not testing the bounds of your computation inside the kernel. If the total amount of work does not evenly divide to the size of a block, some threads will try to write to indices that are outside the output array. I suggest you also pass the size as a parameter to the kernel and introduce the check:
__global__ void addKernel(float *c, float *a, float *b, int size)
{
int i = threadIdx.x;
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < size) c[idx] = a[idx] + b[idx];
}
I see that you are indexing into arrays a, b and c in your kernel, but you do not check to make sure that the index is within the array bounds. You are therefore writing into memory that you do not own, causing seg faults in random places.