cuda calculations give nan after so many loops - c++

This is my first time working with cuda. I am running some calculations involving cufft and two simple kernels on an NxNxN mesh (N=128). It seems to work fine until some time between 4040 and 4050 loops, the values of my mesh points become nan. On a smaller mesh, it can complete more loops before failing. This makes me think there is a memory leak somewhere. I tried running cuda-memcheck but it returned no errors. Can you spot any problems that could be causing this? I have reduced the code to a minimum but it is still long, my apologies. Thank you for your help.
#define _USE_MATH_DEFINES
#include <iostream>
#include <math.h>
#include <cstdlib>
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
using namespace std;
__global__ void Cube (cufftComplex *data, cufftComplex *data3, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
data3[i].x = pow(data[i].x, 3);
data3[i].y = 0;
}
__syncthreads();
}
__global__ void Spectral (cufftComplex *data, cufftComplex *data3, float *w, float *v, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
data[i].x = (w[i] * data[i].x + data3[i].x * v[i]) / n;
data[i].y = 0;
}
__syncthreads();
}
float ran();
int main (int argc, char **argv) {
float QQ, C;
float tmax = 5000;
int N = 128;
int n = N*N*N;
float dn = M_PI/8;
float dt = .075;
float psi0 = -0.175;
float r = -0.1;
tmax *= dt;
//setup cuda complex arrays
int mem_size = sizeof(cufftComplex)*n;
int float_mem_size = sizeof(float)*n;
cufftComplex *h_data = (cufftComplex*)malloc(mem_size);
cufftComplex *d_data;
cudaMalloc((void**)&d_data, mem_size);
cufftComplex *h_data3 = (cufftComplex*)malloc(mem_size);
cufftComplex *d_data3;
cudaMalloc((void**)&d_data3, mem_size);
float * h_w = (float*)malloc(float_mem_size);
float *d_w;
cudaMalloc(&d_w, float_mem_size);
float * h_v = (float*)malloc(float_mem_size);
float *d_v;
cudaMalloc(&d_v, float_mem_size);
for (int i=0; i<n; i++){
h_data[i].x = psi0 + r * ran();
h_data[i].y = 0;
}
int nx, ny, nz;
float B = -4 * M_PI * M_PI / ( pow((N*dn),2));
for (int i=0; i<n; i++){
nx = (i % N);
ny = (i / N) % N;
nz = i / (N * N);
if (nx > (N / 2)) {
nx = (N - nx);
}
if (ny > (N / 2)) {
ny = (N - ny);
}
if (nz > (N / 2)) {
nz = (N - nz);
}
QQ = B * (pow(nx, 2.0) + pow(ny, 2.0) + pow(nz, 2.0));
C = -r - 2.0 * QQ - pow(QQ, 2.0);
h_w[i] = exp(QQ * (1.0 - C) * dt);
h_v[i] = (h_w[i] - 1.0) / (1.0 - C);
}
cudaMemcpy(d_w, h_w, float_mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, h_v, float_mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_data, h_data, mem_size, cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan3d(&plan, N, N, N, CUFFT_C2C);
int maxThreads=(N>1024)?1024:N;
int threadsPerBlock = maxThreads;
int numBlocks = n/maxThreads;
for (float t = 0; t < tmax; t += dt) {
Cube <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, n);
cudaDeviceSynchronize();
cufftExecC2C(plan, d_data3, d_data3, CUFFT_FORWARD);
cudaDeviceSynchronize();
cufftExecC2C(plan, d_data, d_data, CUFFT_FORWARD);
cudaDeviceSynchronize();
Spectral <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, d_w, d_v, n);
cudaDeviceSynchronize();
cufftExecC2C(plan, d_data, d_data, CUFFT_INVERSE);
cudaDeviceSynchronize();
}
//check output (should be a number)
cudaMemcpy(h_data, d_data, mem_size, cudaMemcpyDeviceToHost);
cout <<h_data[0].x <<endl;
//clean up
cufftDestroy(plan);
cudaFree(d_data);
cudaFree(d_data3);
cudaFree(d_w);
cudaFree(d_v);
free(h_w);
free(h_v);
free(h_data);
free(h_data3);
return 0;
}
float ran(){ //random in range [-1,1]
float u= float (rand())/(RAND_MAX);
//return round(u);
return 2*u-1;
}

Here is my instrumentation of your code so far. When I enabled the device assert in my_assert, it indicated that d_data3 input at the nan5 point was failing (i.e. it was nan). That indicated that the cufftExecC2C call on d_data3 immmediately prior was producing nan data. If you have invalid inputs, I believe an FFT can produce out-of-range results.
The code is instrumented to allow you to dump the data and look at it. You will have to modify dump_data to display whatever it is you wish to see.
When I run the code below, it eventually prints out:
4850.14
4851.14
4852.14
4853.14
4854.14
4855.14
4856.14
4857.14
4858.14
4859.14
4860.14
d_data3 output nan check failed
$
So the nan first occurs on iteration 4860, and the d_data3 input check did not fail, so the nan occurs in d_data3 as a result of the FFT operation in loop iteration 4860. You'll need to study the input and output data to see if you can determine why. There may be some modification to the d_data3 data in the Cube kernel that is causing this. For example, since you are repetitively cubing the data, doesn't it seem reasonable at some point that it would exceed float range?
Here's my instrumented code:
#include <iostream>
#include <math.h>
#include <cstdlib>
#include <cuda_runtime.h>
#include <cufft.h>
#include <cufftXt.h>
#include <assert.h>
#include <stdio.h>
using namespace std;
__host__ __device__ void my_assert(bool cond){
//assert(cond);
}
__global__ void Cube (cufftComplex *data, cufftComplex *data3, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float temp = data[i].x;
if (isnan(temp)) {printf("nan1: %d\n", i); my_assert(0);}
data3[i].x = pow(data[i].x, 3);
if (isnan(data3[i].x)) {printf("nan2: %d %f\n", i, data[i].x); my_assert(0);}
data3[i].y = 0;
}
__syncthreads();
}
__global__ void Spectral (cufftComplex *data, cufftComplex *data3, float *w, float *v, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float temp1 = w[i];
if (isnan(temp1)) {printf("nan3: %d\n", i); my_assert(0);}
float temp2 = data[i].x;
if (isnan(temp2)) {printf("nan4: %d\n", i); my_assert(0);}
float temp3 = data3[i].x;
if (isnan(temp3)) {printf("nan5: %d\n", i); my_assert(0);}
float temp4 = v[i];
if (isnan(temp4)) {printf("nan6: %d\n", i); my_assert(0);}
data[i].x = (w[i] * data[i].x + data3[i].x * v[i]) / n;
if (isnan(data[i].x)) {printf("nan7: %d, %f, %f, %f, %f, %d\n",i, temp1, temp2, temp3, temp4, n); my_assert(0);}
data[i].y = 0;
}
__syncthreads();
}
__global__ void nan_kernel(cufftComplex *d, int len, bool *res){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
if (idx < len)
if (isnan(d[idx].x) || isnan(d[idx].y)) *res = true;
}
bool *d_nan;
bool checknan(cufftComplex *d, int len){
bool h_nan = false;
cudaMemcpy(d_nan, &h_nan, sizeof(bool), cudaMemcpyHostToDevice);
nan_kernel<<<(len/1024)+1, 1024>>>(d, len, d_nan);
cudaMemcpy(&h_nan, d_nan, sizeof(bool), cudaMemcpyDeviceToHost);
return h_nan;
}
void dump_data(cufftComplex *d1, cufftComplex *d2, int len)
{
// add code here to spit out the data however you would like to see it
// perhaps to a file
std::cout << "input: output: " << std::endl;
for (int i = 0; i < len; i++)
std::cout << d1[i].x << "," << d1[i].y << " " << d2[i].x << "," << d2[i].y << std::endl;
};
float ran();
int main (int argc, char **argv) {
float QQ, C;
float tmax = 5000;
int N = 128;
int n = N*N*N;
float dn = M_PI/8;
float dt = .075;
float psi0 = -0.175;
float r = -0.1;
tmax *= dt;
//setup cuda complex arrays
int mem_size = sizeof(cufftComplex)*n;
int float_mem_size = sizeof(float)*n;
cufftComplex *h_data = (cufftComplex*)malloc(mem_size);
cufftComplex *d_data;
cudaMalloc((void**)&d_data, mem_size);
cufftComplex *h_data3 = (cufftComplex*)malloc(mem_size);
cufftComplex *d_data3;
cudaMalloc((void**)&d_data3, mem_size);
float * h_w = (float*)malloc(float_mem_size);
float *d_w;
cudaMalloc(&d_w, float_mem_size);
float * h_v = (float*)malloc(float_mem_size);
float *d_v;
cudaMalloc(&d_v, float_mem_size);
for (int i=0; i<n; i++){
h_data[i].x = psi0 + r * ran();
h_data[i].y = 0;
}
int nx, ny, nz;
float B = -4 * M_PI * M_PI / ( pow((N*dn),2));
for (int i=0; i<n; i++){
nx = (i % N);
ny = (i / N) % N;
nz = i / (N * N);
if (nx > (N / 2)) {
nx = (N - nx);
}
if (ny > (N / 2)) {
ny = (N - ny);
}
if (nz > (N / 2)) {
nz = (N - nz);
}
QQ = B * (pow(nx, 2.0) + pow(ny, 2.0) + pow(nz, 2.0));
C = -r - 2.0 * QQ - pow(QQ, 2.0);
h_w[i] = exp(QQ * (1.0 - C) * dt);
h_v[i] = (h_w[i] - 1.0) / (1.0 - C);
}
cudaMemcpy(d_w, h_w, float_mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, h_v, float_mem_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_data, h_data, mem_size, cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan3d(&plan, N, N, N, CUFFT_C2C);
int maxThreads=(N>1024)?1024:N;
int threadsPerBlock = maxThreads;
int numBlocks = n/maxThreads;
cufftResult res;
cudaMalloc(&d_nan, sizeof(bool));
cufftComplex *i3, *o3;
i3 = (cufftComplex *)malloc(mem_size);
o3 = (cufftComplex *)malloc(mem_size);
std::cout << "start loop" << std::endl;
for (float t = 0; t < tmax; t += dt) {
std::cout << t/dt << std::endl;
Cube <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, n);
cudaDeviceSynchronize();
cudaMemcpy(i3, d_data3, mem_size, cudaMemcpyDeviceToHost);
if (checknan(d_data3, n)) {std::cout << "d_data3 input nan check failed" << std::endl; return -1;}
res = cufftExecC2C(plan, d_data3, d_data3, CUFFT_FORWARD);
if (res != CUFFT_SUCCESS) {std::cout << "cufft1 error: " << (int)res << " , " << t/dt << std::endl; return 1;}
cudaDeviceSynchronize();
if (checknan(d_data3, n)) {std::cout << "d_data3 output nan check failed" << std::endl; cudaMemcpy(o3, d_data3, mem_size, cudaMemcpyDeviceToHost); dump_data(i3, o3, n); return -1;}
res = cufftExecC2C(plan, d_data, d_data, CUFFT_FORWARD);
if (res != CUFFT_SUCCESS) {std::cout << "cufft2 error: " << (int)res << " , " << t/dt << std::endl; return 1;}
cudaDeviceSynchronize();
Spectral <<<numBlocks, threadsPerBlock>>> (d_data, d_data3, d_w, d_v, n);
cudaDeviceSynchronize();
res = cufftExecC2C(plan, d_data, d_data, CUFFT_INVERSE);
if (res != CUFFT_SUCCESS) {std::cout << "cufft3 error: " << (int)res << " , " << t/dt << std::endl; return 1;}
cudaDeviceSynchronize();
}
//check output (should be a number)
cudaMemcpy(h_data, d_data, mem_size, cudaMemcpyDeviceToHost);
cout <<h_data[0].x <<endl;
cudaError_t cres = cudaGetLastError();
if (cres != cudaSuccess) std::cout << "cuda error: " << cudaGetErrorString(cres) << std::endl;
//clean up
cufftDestroy(plan);
cudaFree(d_data);
cudaFree(d_data3);
cudaFree(d_w);
cudaFree(d_v);
free(h_w);
free(h_v);
free(h_data);
free(h_data3);
return 0;
}
float ran(){ //random in range [-1,1]
float u= float (rand())/(RAND_MAX);
//return round(u);
return 2*u-1;
}
EDIT:
After some addition of printout code to dump_data (see modification above) I see this:
...
4859.14
4860.14
d_data3 output nan check failed
input: output:
3.37127e+19,0 nan,nan
3.21072e+19,0 nan,nan
2.76453e+19,0 nan,nan
2.13248e+19,0 nan,nan
1.44669e+19,0 nan,nan
8.37214e+18,0 nan,nan
3.93645e+18,0 nan,nan
1.35501e+18,0 nan,nan
2.55741e+17,0 nan,nan
5.96468e+15,0 nan,nan
-1.36656e+16,0 nan,nan
-2.33688e+17,0 nan,nan
-8.37407e+17,0 nan,nan
-1.79915e+18,0 nan,nan
-2.96302e+18,0 nan,nan
-4.11485e+18,0 nan,nan
-5.03876e+18,0 nan,nan
-5.57617e+18,0 nan,nan
-5.65307e+18,0 nan,nan
-5.28957e+18,0 nan,nan
-4.5872e+18,0 nan,nan
-3.68309e+18,0 nan,nan
...
I'm not an FFT expert, but it might be the case that if you do an FFT on a large array filled with large values, using float precision, that overflow may occur. If you only need to get to 5000 iterations and you're failing at 4860, you might get there if you change all your datatypes to double from float, but I'm not sure about the numerical sense of what you are doing here.
Finally, note that both cufft and fftw perform un-normalized transforms. This may be playing a role in the seeming growth of magnitudes in your data set. As I stated already, I'm not familiar with the arithmetic or algorithm you are trying to implement here.

Is it possible that you have a float underflow happening around iteration 4040? Taking the cube of your data3 would lead me to check out that possibility. It is pretty easy to spiral into an underflow on a float32 if your not careful. You could throw a check in there to limit your value to some minimum epsilon to prevent this.

Related

How to access dynamically allocated array in CUDA

Here I'm trying to access a dynamically allocated array in CUDA. However, after running the output is c[0][0] = 0. Am I accessing the allocated array correctly? I think the way I'm copying the arrays is probably correct and for some reason, the value of C has not been changed on the device.
#include<iostream>
using namespace std;
__global__ void add_matrix(float *A, float *B, float *C, int n) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
if ((i < n) && (j < n)){
C[i*n+j] = A[i*n+j] + B[i*n+j];
}
}
int main(){
const size_t N = 1024;
const size_t size = N * N * sizeof(float);
float *A, *B, *C;
A = (float*) malloc(size);
B = (float*) malloc(size);
C = (float*) malloc(size);
for (size_t i=0; i<N*N; i++){
A[i] = 5.0;
B[i] = 6.0;
}
float *A_d, *B_d, *C_d;
cudaMalloc((void**)&A_d, size);
cudaMalloc((void**)&B_d, size);
cudaMalloc((void**)&C_d, size);
auto code = cudaMemcpy(A_d, A, size, cudaMemcpyHostToDevice);
if (code != cudaSuccess){
cout << "Error copying A to device" << endl;
}
code = cudaMemcpy(B_d, B, size, cudaMemcpyHostToDevice);
if (code != cudaSuccess){
cout << "Error copying B to device" << endl;
}
dim3 threads(N, N);
dim3 blocks(1,1);
add_matrix<<<blocks, threads>>>(A_d, B_d, C_d, N);
code = cudaMemcpy(C, C_d, size, cudaMemcpyDeviceToHost);
if (code != cudaSuccess){
cout << "Error copying C from device" << endl;
}
std::cout << "C[0][0] : " << C[0] << std::endl;
free(A); free(B); free(C);
cudaFree(A_d); cudaFree(B_d); cudaFree(C_d);
return 0;
}
The problem was arranging the blocks. I totally forgot each block can have a limited number of threads. we can obtain the maximum threads per block by getting maxThreadsPerBlock property using cudaDeviceGetAttribute. It seems the Colab GPU supports 1024 threads in each block. so I changed the arrangement this way:
dim3 threads(32,32);
dim3 blocks(32,32);
And it worked

cudaMemcpyFromSymbol and cudaMemcpyToSymbol always return cudaErrorInvalidSymbol (13) error

I have a problem with call cudaMemcpyFrom(To)Symbol function in CUDA C++ API. Alternative ideas with storing some parameters between blocks are welcome. Below I've provide some (example) code, that doesn't work correctly.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdlib>
#include <ctime>
int avgHost(int*, int);
cudaError_t cudaError;
__device__ int getGlobalIdx()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
__device__ int avg;
__device__ int count;
__device__ int sum;
__global__ void avgKernel(const int *arr)
{
auto idx = getGlobalIdx();
count++;
sum += arr[idx];
avg = sum / count;
}
int main(int argc, char ** argv)
{
const int arraySize = 128;
auto arr1 = (int*)__vcrt_malloc_normal(arraySize * sizeof(int));
srand(time(NULL));
for (size_t i = 0; i < arraySize; i++)
{
arr1[i] = rand() % 100;
}
auto hostAvg = avgHost(arr1, arraySize);
fprintf_s(stdout, "AVG: %d", hostAvg);
free(arr1);
return 0;
}
int avgHost(int* arr, int arraySize)
{
int *dArray = nullptr;
cudaSetDevice(0);
cudaError = cudaMemcpyToSymbol((void *)count, (void*)0, sizeof(int), 0, cudaMemcpyHostToDevice);
if (cudaError)
{
fprintf_s(stderr, "%s\t%d\n", cudaGetErrorString(cudaError), __LINE__);
return -1;
}
cudaMalloc((void**)&dArray, arraySize * sizeof(int));
cudaMemcpy(dArray, arr, arraySize * sizeof(int), cudaMemcpyKind::cudaMemcpyHostToDevice);
avgKernel <<<1, arraySize>>> (dArray);
cudaDeviceSynchronize();
int hostResult = -1;
cudaError = cudaMemcpyFromSymbol(&hostResult, (void *)avg, sizeof(int), 0, cudaMemcpyDeviceToHost);
if (cudaError)
{
fprintf_s(stderr, "%s\t%d\n", cudaGetErrorString(cudaError), __LINE__);
}
cudaFree(dArray);
return hostResult;
}
When I launch code system printed
invalid device symbol 55 AVG: -1
C:\Users\Administrator\source\repos\CudaTests\x64\Debug\cudabase.exe
(process 18152) exited with code 0.
Any ideas?
Remove this lines, because device variable "count" is always 0 before launch kernel.
cudaError = cudaMemcpyToSymbol((void *)count, (void*)0, sizeof(int), 0, cudaMemcpyHostToDevice);
if (cudaError)
{
fprintf_s(stderr, "%s\t%d\n", cudaGetErrorString(cudaError), __LINE__);
return -1;
}
EDIT
cudaError = cudaMemcpyFromSymbol(&hostResult, (void *)avg, sizeof(int), 0, CudaMemcpyDeviceToHost);
TO
cudaError = cudaMemcpyFromSymbol(&hostResult, avg, sizeof(int), 0, cudaMemcpyDeviceToHost);
(remove cast from parameter "avg"). Intellisence provide error, but code compile and run success.

2D array CUDA problems

I'm currently struggling to properly work with 2D arrays within my CUDA kernel. 1D was fine but so far had no luck with it moving on to 2D. Here is my host function and kernel:
__global__ void add_d2D(double *x, double *y,double *z, int n, int m){
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x){
for(int j = blockIdx.y * blockDim.y + threadIdx.y; j < m; j += blockDim.y * gridDim.y){
z[i*m + j] = x[i*m + j] + y[i*m + j];
}
}
}
__host__ void add2D(double *a, double *b, double *result, int N, int M){
double *a_d, *b_d, *c_d;
size_t pitcha;
size_t pitchb;
size_t pitchc;
cudaErrchk(cudaMallocPitch(&a_d,&pitcha, M*sizeof(double),N));
cudaErrchk(cudaMallocPitch(&b_d,&pitchb, M*sizeof(double),N));
cudaErrchk(cudaMallocPitch(&c_d,&pitchc, M*sizeof(double),N));
cudaErrchk(cudaMemcpy2D(a_d,M*sizeof(double), a,pitcha, M*sizeof(double),N, cudaMemcpyHostToDevice));
cudaErrchk(cudaMemcpy2D(b_d,M*sizeof(double), b,pitchb, M*sizeof(double),N, cudaMemcpyHostToDevice));
dim3 threadsPerBlock(2, 2);
dim3 numBlocks(N/threadsPerBlock.x, M/threadsPerBlock.y);
add_d2D<<<numBlocks, threadsPerBlock>>>(a_d, b_d, c_d , N, M);
cudaDeviceSynchronize();
cudaErrchk(cudaMemcpy2D(result,M*sizeof(double), c_d,pitchc, M*sizeof(double),N, cudaMemcpyDeviceToHost));
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
And below my example to test it. It prints out the first 10 values of C correctly but all others remain 0. I believe the problem is within the kernel. Where it can't find the correct values due to the pitch, but not sure how to solve it correctly though.
double a[4][10];
double b[4][10];
double c[4][10];
for (int i = 0; i < 4; i ++){
for (int j = 0; j < 10; j ++){
a[i][j] = 0 + rand() % 10;
b[i][j] = 0 + rand() % 10;
}
}
ertiscuda::add2D((double *)a, (double *)b, (double *)c, 4, 10);
for (int i = 0; i < 4; i ++){
for (int j = 0; j < 10; j ++){
std::cout << a[i][j] << " " << b[i][j] << " " << c[i][j] << std::endl;
}
}
You have two mistakes
Each thread in the kernel should perform one operation rather than all the operations. (For memory reasons you might want to do more, be we will keep this example simple).
You had the destination and source pitches switched when loading the data onto the device.
Here is a working version
#include <cuda_runtime.h>
#include <stdlib.h>
#include <iostream>
#include <sstream>
#define CUDASAFECALL( err ) cuda_safe_call(err, __FILE__, __LINE__ )
void cuda_safe_call(const cudaError err, const char *file, const int line)
{
if (cudaSuccess != err)
{
std::stringstream error_msg;
error_msg << "cuda_safe_call() failed at " << file << ":" << line << ":" << cudaGetErrorString(err);
const auto error_msg_str = error_msg.str();
std::cout << error_msg_str << std::endl;
throw std::runtime_error(error_msg_str);
}
}
__global__ void add_d2D(const double *x, const double *y, double *z, int n, int m, int m_pitch_elements)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row< n && col <m )
{
auto idx = row*m_pitch_elements + col;
z[idx] = x[idx] + y[idx];
//z[idx] = idx;
}
}
__host__ void add2D(const double *a,const double *b, double *result, int N, int M) {
double *a_d, *b_d, *c_d;
size_t pitcha,pitchb,pitchc;
CUDASAFECALL(cudaMallocPitch(&a_d, &pitcha, M * sizeof(double), N));
CUDASAFECALL(cudaMallocPitch(&b_d, &pitchb, M * sizeof(double), N));
CUDASAFECALL(cudaMallocPitch(&c_d, &pitchc, M * sizeof(double), N));
CUDASAFECALL(cudaMemcpy2D(a_d, pitcha, a, M * sizeof(double), M * sizeof(double), N, cudaMemcpyHostToDevice));
CUDASAFECALL(cudaMemcpy2D(b_d, pitchb, b, M * sizeof(double), M * sizeof(double), N, cudaMemcpyHostToDevice));
dim3 threadsPerBlock(2, 2);
auto safediv = [](auto a, auto b) {return static_cast<unsigned int>(ceil(a / (b*1.0))); };
dim3 numBlocks(safediv(N, threadsPerBlock.x), safediv( M, threadsPerBlock.y));
//all the pitches should be the same
auto pitch_elements = pitcha / sizeof(double);
add_d2D << <numBlocks, threadsPerBlock >> >(a_d, b_d, c_d, N, M, pitch_elements);
CUDASAFECALL(cudaDeviceSynchronize());
CUDASAFECALL(cudaMemcpy2D(result, M * sizeof(double), c_d, pitchc, M * sizeof(double), N, cudaMemcpyDeviceToHost));
CUDASAFECALL(cudaFree(a_d));
CUDASAFECALL(cudaFree(b_d));
CUDASAFECALL(cudaFree(c_d));
}
int main()
{
double a[4][10];
double b[4][10];
double c[4][10];
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 10; j++) {
a[i][j] = 0 + rand() % 10;
b[i][j] = 0 + rand() % 10;
}
}
add2D((double *)a, (double *)b, (double *)c, 4, 10);
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 10; j++) {
std::cout << a[i][j] << " " << b[i][j] << " " << c[i][j]<< "|"<< a[i][j]+ b[i][j] << std::endl;
}
}
return 0;
}

Fast calculation of many scalar products [closed]

Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 8 years ago.
Improve this question
I have a program that calculates 1-10 million scalar products.
It looks like this. ts and A are arrays of about 1000-10000 3D points (each element is a 3x1 vector). For the moment, with ts.size() = 10,000 and A.size() = 1000, my code takes about 41ms. I have not done any parallelization so far. Will the calculations be much faster, for example, in CUDA? I have no such experience. Or is there any other way? Thanks.
for(int i = 0; i< ts.size(); i++){
for(int j = 0; j< A.size(); j++){
if(abs(scalarProduct(ts.at(i), A.at(j))) <epsilon){
score[i] +=1;
}
}
}
This is my implementation of the scalar product.
double scalarProduct(const Point &p1,const Point &p2)
{
return (p1.getX()*p2.getX() + p1.getY()*p2.getY() + p1.getZ()*p2.getZ()) ;
}
Could I use Lapack or Eigen instead, formulating the problem as matrix multiplication? I've done that in Matlab and it is only 5 times slower. Any speedup would be great. With OpenMP i guess I could be 4x faster.
This answer consists of two parts:
Accelerating the calculation of many independent scalar products;
Solving your specific problem.
PART 1
The problem of calculating a large number of independent scalar products is an embarassingly parallel problem. If you aim at accelerating only the mentioned scalar products, retaining the rest of the computation on the CPU, then I agree with Calvin that most of the time will be spent in device-> memory transaction of the large N*M resulting matrix. However, if you purge your timing from the mentioned transaction, accelerating the calculations will be worth. This is shown by the code below, tested on an Intel Xeon E5-2650 2.00 GHz, Eight core processor equipped with an NVIDIA Kepler K20c cards, and whose timing is the following:
CPU: 27ms; GPU (without D2H transaction): 0.08ms; GPU (with D2H transaction): 23ms
#include <stdio.h>
#include <time.h>
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/*************************************************/
/* DEVICE FUNCTION PERFORMING THE SCALAR PRODUCT */
/*************************************************/
__host__ __device__ float scalarProduct(float p1x, float p1y, float p1z, float p2x, float p2y, float p2z)
{
return (p1x * p2x + p1y * p2y + p1z * p2z) ;
}
/*******************/
/* KERNEL FUNCTION */
/*******************/
__global__ void kernel(const float* __restrict__ p1x, const float* __restrict__ p1y, const float* __restrict__ p1z,
const float* __restrict__ p2x, const float* __restrict__ p2y, const float* __restrict__ p2z,
float* __restrict__ output, const int N, const int M) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if ((idx < N) && (idy < M))
output[idy * N + idx] = scalarProduct(p1x[idx], p1y[idx], p1z[idx], p2x[idy], p2y[idy], p2z[idy]);
}
/********/
/* MAIN */
/********/
int main() {
const int N = 10000;
const int M = 1000;
// --- Host side allocations
float *Ax = (float*)malloc(N*sizeof(float));
float *Ay = (float*)malloc(N*sizeof(float));
float *Az = (float*)malloc(N*sizeof(float));
float *Bx = (float*)malloc(M*sizeof(float));
float *By = (float*)malloc(M*sizeof(float));
float *Bz = (float*)malloc(M*sizeof(float));
float *C = (float*)malloc(N*M*sizeof(float));
float *D = (float*)malloc(N*M*sizeof(float));
// --- Device side allocations
float *d_Ax; gpuErrchk(cudaMalloc((void**)&d_Ax, N*sizeof(float)));
float *d_Ay; gpuErrchk(cudaMalloc((void**)&d_Ay, N*sizeof(float)));
float *d_Az; gpuErrchk(cudaMalloc((void**)&d_Az, N*sizeof(float)));
float *d_Bx; gpuErrchk(cudaMalloc((void**)&d_Bx, M*sizeof(float)));
float *d_By; gpuErrchk(cudaMalloc((void**)&d_By, M*sizeof(float)));
float *d_Bz; gpuErrchk(cudaMalloc((void**)&d_Bz, M*sizeof(float)));
float *d_C; gpuErrchk(cudaMalloc((void**)&d_C, N*M*sizeof(float)));
// --- Initialization
srand(time(NULL));
for (int i=0; i<N; i++) {
Ax[i] = rand() / RAND_MAX;
Ay[i] = rand() / RAND_MAX;
Az[i] = rand() / RAND_MAX;
}
for (int i=0; i<M; i++) {
Bx[i] = rand() / RAND_MAX;
By[i] = rand() / RAND_MAX;
Bz[i] = rand() / RAND_MAX;
}
// --- Host side computations
double t1 = clock();
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
C[i*M + j] = scalarProduct(Ax[i], Ay[i], Az[i], Bx[j], By[j], Bz[j]);
double t2 = clock();
printf("CPU elapsed time: %3.4f ms \n", 1000.*((double)(t2-t1))/CLOCKS_PER_SEC);
// --- Device side computations
dim3 dimBlock(BLOCKSIZE_X, BLOCKSIZE_Y);
dim3 dimGrid(iDivUp(N, BLOCKSIZE_X), iDivUp(M, BLOCKSIZE_Y));
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// --- Host to device memory transfers
gpuErrchk(cudaMemcpy(d_Ax, Ax, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Ay, Ay, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Az, Az, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Bx, Bx, M*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_By, By, M*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Bz, Bz, M*sizeof(float), cudaMemcpyHostToDevice));
// --- Computations
kernel<<<dimGrid, dimBlock>>>(d_Ax, d_Ay, d_Az, d_Bx, d_By, d_Bz, d_C, N, M);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(D, d_C, N*M*sizeof(float), cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time: %3.4f ms \n", time);
for (int i=0; i<N*M; i++) {
if (D[i] != C[i]) {
printf("Mismatch at i = %i; Host= %f, Device = %f\n", i, C[i], D[i]);
return 1;
}
}
printf("Results match!\n");
cudaDeviceReset();
return 0;
}
PART 2
For solving your specific problem, the CUDA will be worth, even by considering the D2H memory transaction (which is very cheap). This is confirmed by the code below, tested on the same system as above, and whose timing is the following:
CPU: 46ms; GPU (with D2H transaction): 0.31ms;
#include <stdio.h>
#include <time.h>
#define BLOCKSIZE_X 16
#define BLOCKSIZE_Y 16
/********************/
/* CUDA ERROR CHECK */
/********************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/*************************************************/
/* DEVICE FUNCTION PERFORMING THE SCALAR PRODUCT */
/*************************************************/
__host__ __device__ float scalarProduct(float p1x, float p1y, float p1z, float p2x, float p2y, float p2z)
{
return (p1x * p2x + p1y * p2y + p1z * p2z) ;
}
/*******************/
/* KERNEL FUNCTION */
/*******************/
__global__ void kernel(const float* __restrict__ p1x, const float* __restrict__ p1y, const float* __restrict__ p1z,
const float* __restrict__ p2x, const float* __restrict__ p2y, const float* __restrict__ p2z,
float* __restrict__ output, const int N, const int M) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.y * blockDim.y;
if ((idx < N) && (idy < M))
if(abs(scalarProduct(p1x[idx], p1y[idx], p1z[idx], p2x[idy], p2y[idy], p2z[idy])) < 0.01f)
output[idx] = 1.f;
else
output[idx] = 0.f;
}
/********/
/* MAIN */
/********/
int main() {
const int N = 10000;
const int M = 1000;
// --- Host side allocations
float *Ax = (float*)malloc(N*sizeof(float));
float *Ay = (float*)malloc(N*sizeof(float));
float *Az = (float*)malloc(N*sizeof(float));
float *Bx = (float*)malloc(M*sizeof(float));
float *By = (float*)malloc(M*sizeof(float));
float *Bz = (float*)malloc(M*sizeof(float));
float *C = (float*)malloc(N*sizeof(float));
float *D = (float*)malloc(N*sizeof(float));
// --- Device side allocations
float *d_Ax; gpuErrchk(cudaMalloc((void**)&d_Ax, N*sizeof(float)));
float *d_Ay; gpuErrchk(cudaMalloc((void**)&d_Ay, N*sizeof(float)));
float *d_Az; gpuErrchk(cudaMalloc((void**)&d_Az, N*sizeof(float)));
float *d_Bx; gpuErrchk(cudaMalloc((void**)&d_Bx, M*sizeof(float)));
float *d_By; gpuErrchk(cudaMalloc((void**)&d_By, M*sizeof(float)));
float *d_Bz; gpuErrchk(cudaMalloc((void**)&d_Bz, M*sizeof(float)));
float *d_C; gpuErrchk(cudaMalloc((void**)&d_C, N*sizeof(float)));
// --- Initialization
srand(time(NULL));
for (int i=0; i<N; i++) {
Ax[i] = rand() / RAND_MAX;
Ay[i] = rand() / RAND_MAX;
Az[i] = rand() / RAND_MAX;
}
for (int i=0; i<M; i++) {
Bx[i] = rand() / RAND_MAX;
By[i] = rand() / RAND_MAX;
Bz[i] = rand() / RAND_MAX;
}
// --- Host side computations
double t1 = clock();
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
if(abs(scalarProduct(Ax[i], Ay[i], Az[i], Bx[j], By[j], Bz[j])) < 0.01f)
C[i] = 1.f;
else
C[i] = 0.f;
double t2 = clock();
printf("CPU elapsed time: %3.4f ms \n", 1000.*((double)(t2-t1))/CLOCKS_PER_SEC);
// --- Device side computations
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// --- Host to device memory transfers
gpuErrchk(cudaMemcpy(d_Ax, Ax, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Ay, Ay, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Az, Az, N*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Bx, Bx, M*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_By, By, M*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_Bz, Bz, M*sizeof(float), cudaMemcpyHostToDevice));
// --- Computations
kernel<<<iDivUp(N, BLOCKSIZE_X), BLOCKSIZE_X>>>(d_Ax, d_Ay, d_Az, d_Bx, d_By, d_Bz, d_C, N, M);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(D, d_C, N*sizeof(float), cudaMemcpyDeviceToHost));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time: %3.4f ms \n", time);
for (int i=0; i<N; i++) {
if (D[i] != C[i]) {
printf("Mismatch at i = %i; Host= %f, Device = %f\n", i, C[i], D[i]);
return 1;
}
}
printf("Results match!\n");
cudaDeviceReset();
return 0;
}
Instead of optimising for arithmetic, you should use better algorithm first.
In most practical situation ts and A are not totally random per each cycle, and you may somehow organise (sort) them spatially, and greatly reduce the need for calculating spatial metric.
Now if you insist to stick with current algorithm, you may enable compiler to emit SSE code, this should give some instant boost without any programming work.
Now since you have to ask this question, the chance that you may further squeeze cycles by manually code with compiler intrinsics, is relatively narrow.
About CUDA, for just 10 million dot product the overhead for CPU-RAM-DISPLAY RAM-GPU communication is significant and not worth all the trouble.
To parallelize this using MIMD with OpenMP you can do this:
#pragma omp parallel for
for(int i = 0; i< ts.size(); i++){
for(int j = 0; j< A.size(); j++){
if(abs(scalarProduct(ts.at(i), A.at(j))) <epsilon){
score[i] +=1;
}
}
}
You could also consider using SIMD. In that case you should change your data structure and store blocks of points equal to the SIMD width (4 for SSE with floats). Something like
class PointBlock4 {
float x[4];
float y[4];
float z[4];
//
}
Each block has four points. This is obviously more complicated but it's achievable. You could get a speed up as four as well. Combining SIMD and MIMD you could get a speedup of 16x (with four cores). But for large n your algorithm will become memory bound and not compute bound so you will achieve a much lower speedup. In fact your algorithm may already be memory bound so you might achieve much with SIMD or MIMD. I would test OpenMP first to see if you gain much.

Sparse matrix-vector multiplication in CUDA

I'm trying to implement a matrix-vector Multiplication on GPU (using CUDA).
In my C++ code (CPU), I load the matrix as a dense matrix, and then I perform the matrix-vector multiplication using CUDA. I'm also using shared memory to improve the performance.
How can I load the matrix in an efficient way, knowing that my matrix is a sparse matrix?
Below is my C++ function to load the matrix:
int readMatrix( char* filename, float* &matrix, unsigned int *dim = NULL, int majority = ROW_MAJOR )
{
unsigned int w, h, x, y, num_entries;
float val;
std::ifstream file( filename );
if ( file )
{
file >> h >> w >> num_entries;
cout << w << " " << h << " " << num_entries << "\n";
assert( w == h || w == 1 || h == 1 );
if( dim != NULL ) *dim = std::max( w, h );
matrix = new float[ w * h ];
unsigned int i;
for( i = 0; i < num_entries; i++ ){
if( file.eof() ) break;
file >> y >> x >> val;
if( majority == ROW_MAJOR ){
matrix[ w * y + x ] = val;
} else if( majority == COLUMN_MAJOR ){
matrix[ h * x + y ] = val;
}
}
file.close();
if( i == num_entries )
std::cout << "\nFile read successfully\n";
else
std::cout << "\nFile read successfully but seems defective:\n num entries read = " << i << ", entries epected = " << num_entries << "\n";
// print first few elements
if( w == h ){
for( unsigned int i = 0; i < w; i++ ){
printf("\n");
for( unsigned int j = 0; j < h; j++ ){
printf("%.2f ", matrix[ j + w * i ] );
}
}
}
else{
printf("\n");
for( unsigned int j = 0; j < h; j++ ){
printf("%.2f ", matrix[ j ] );
}
}
} else {
std::cout << "Unable to open file\n";
return false;
}
return true;
}
Below is my CUDA Kernel function that handles the matrix-vector multiplication:
__global__ void
_cl_matrix_vector_( float *A, float *b, float *x, int dim )
{
extern __shared__ float vec[];
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
float temp = 0.0;
int vOffs = 0;
//load vector into shared memory
for (int i = 0; i < (dim/blockDim.x) + 1 ; ++i, vOffs+= blockDim.x) {
vec[vOffs + threadIdx.x] = b[vOffs + threadIdx.x];
}
//make sure all threads are synchronized
__syncthreads();
if (idx < dim) {
temp = 0.0;
//dot product (multiplication)
for (int i = 0; i < dim; i++){
temp += A[idx * dim + i] * vec[i];
}
x[idx] = temp;
}
}
What are the necessary changes that I have to make on my CUDA code to take into account that my matrix is a sparse matrix?
I found out from a forum that we can also use padding to be able to optimize the performance, but this requires me to change the way I read the matrix / sort the matrix. Any ideas how to implement this padding in the way I read the matrix and perform the calculation?
This is a very old post and I want to highlight that cuSPARSE (since some time now) makes routines for the multiplication between sparse matrices or between a sparse matrix and a dense vector available.
For the csr format, the relevant routine for the multiplication between a sparse matrix and a dense vector is cusparse<t>csrmv. Below, a fully worked example showing its use.
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include "Utilities.cuh"
#include <cuda_runtime.h>
#include <cusparse_v2.h>
/********/
/* MAIN */
/********/
int main()
{
// --- Initialize cuSPARSE
cusparseHandle_t handle; cusparseSafeCall(cusparseCreate(&handle));
/**************************/
/* SETTING UP THE PROBLEM */
/**************************/
const int N = 4; // --- Number of rows and columns
// --- Host side dense matrices
double *h_A_dense = (double*)malloc(N * N * sizeof(double));
double *h_x_dense = (double*)malloc(N * sizeof(double));
double *h_y_dense = (double*)malloc(N * sizeof(double));
// --- Column-major ordering
h_A_dense[0] = 0.4612; h_A_dense[4] = -0.0006; h_A_dense[8] = 0.3566; h_A_dense[12] = 0.0;
h_A_dense[1] = -0.0006; h_A_dense[5] = 0.4640; h_A_dense[9] = 0.0723; h_A_dense[13] = 0.0;
h_A_dense[2] = 0.3566; h_A_dense[6] = 0.0723; h_A_dense[10] = 0.7543; h_A_dense[14] = 0.0;
h_A_dense[3] = 0.; h_A_dense[7] = 0.0; h_A_dense[11] = 0.0; h_A_dense[15] = 0.1;
// --- Initializing the data and result vectors
for (int k = 0; k < N; k++) {
h_x_dense[k] = 1.;
h_y_dense[k] = 0.;
}
// --- Create device arrays and copy host arrays to them
double *d_A_dense; gpuErrchk(cudaMalloc(&d_A_dense, N * N * sizeof(double)));
double *d_x_dense; gpuErrchk(cudaMalloc(&d_x_dense, N * sizeof(double)));
double *d_y_dense; gpuErrchk(cudaMalloc(&d_y_dense, N * sizeof(double)));
gpuErrchk(cudaMemcpy(d_A_dense, h_A_dense, N * N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_x_dense, h_x_dense, N * sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_y_dense, h_y_dense, N * sizeof(double), cudaMemcpyHostToDevice));
// --- Descriptor for sparse matrix A
cusparseMatDescr_t descrA; cusparseSafeCall(cusparseCreateMatDescr(&descrA));
cusparseSafeCall(cusparseSetMatType (descrA, CUSPARSE_MATRIX_TYPE_GENERAL));
cusparseSafeCall(cusparseSetMatIndexBase(descrA, CUSPARSE_INDEX_BASE_ONE));
int nnzA = 0; // --- Number of nonzero elements in dense matrix A
const int lda = N; // --- Leading dimension of dense matrix
// --- Device side number of nonzero elements per row of matrix A
int *d_nnzPerVectorA; gpuErrchk(cudaMalloc(&d_nnzPerVectorA, N * sizeof(*d_nnzPerVectorA)));
cusparseSafeCall(cusparseDnnz(handle, CUSPARSE_DIRECTION_ROW, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, &nnzA));
// --- Host side number of nonzero elements per row of matrix A
int *h_nnzPerVectorA = (int *)malloc(N * sizeof(*h_nnzPerVectorA));
gpuErrchk(cudaMemcpy(h_nnzPerVectorA, d_nnzPerVectorA, N * sizeof(*h_nnzPerVectorA), cudaMemcpyDeviceToHost));
printf("Number of nonzero elements in dense matrix A = %i\n\n", nnzA);
for (int i = 0; i < N; ++i) printf("Number of nonzero elements in row %i for matrix = %i \n", i, h_nnzPerVectorA[i]);
printf("\n");
// --- Device side sparse matrix
double *d_A; gpuErrchk(cudaMalloc(&d_A, nnzA * sizeof(*d_A)));
int *d_A_RowIndices; gpuErrchk(cudaMalloc(&d_A_RowIndices, (N + 1) * sizeof(*d_A_RowIndices)));
int *d_A_ColIndices; gpuErrchk(cudaMalloc(&d_A_ColIndices, nnzA * sizeof(*d_A_ColIndices)));
cusparseSafeCall(cusparseDdense2csr(handle, N, N, descrA, d_A_dense, lda, d_nnzPerVectorA, d_A, d_A_RowIndices, d_A_ColIndices));
// --- Host side sparse matrices
double *h_A = (double *)malloc(nnzA * sizeof(*h_A));
int *h_A_RowIndices = (int *)malloc((N + 1) * sizeof(*h_A_RowIndices));
int *h_A_ColIndices = (int *)malloc(nnzA * sizeof(*h_A_ColIndices));
gpuErrchk(cudaMemcpy(h_A, d_A, nnzA * sizeof(*h_A), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_RowIndices, d_A_RowIndices, (N + 1) * sizeof(*h_A_RowIndices), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_A_ColIndices, d_A_ColIndices, nnzA * sizeof(*h_A_ColIndices), cudaMemcpyDeviceToHost));
printf("\nOriginal matrix A in CSR format\n\n");
for (int i = 0; i < nnzA; ++i) printf("A[%i] = %f ", i, h_A[i]); printf("\n");
printf("\n");
for (int i = 0; i < (N + 1); ++i) printf("h_A_RowIndices[%i] = %i \n", i, h_A_RowIndices[i]); printf("\n");
printf("\n");
for (int i = 0; i < nnzA; ++i) printf("h_A_ColIndices[%i] = %i \n", i, h_A_ColIndices[i]);
printf("\n");
for (int i = 0; i < N; ++i) printf("h_x[%i] = %f \n", i, h_x_dense[i]); printf("\n");
const double alpha = 1.;
const double beta = 0.;
cusparseSafeCall(cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE, N, N, nnzA, &alpha, descrA, d_A, d_A_RowIndices, d_A_ColIndices, d_x_dense,
&beta, d_y_dense));
gpuErrchk(cudaMemcpy(h_y_dense, d_y_dense, N * sizeof(double), cudaMemcpyDeviceToHost));
printf("\nResult vector\n\n");
for (int i = 0; i < N; ++i) printf("h_y[%i] = %f ", i, h_y_dense[i]); printf("\n");
}
You might want to have a look at the very good CUSP library. They implement sparse matrices in a variety of formats (coo, csr, ellpack, diagonal and a hybrid between ellpack and coo). Each with their own advantages as described in the documentation. Most of them are "standard" sparse matrix formats about which you can find more information online. Not a complete answer to your question perhaps, but it should provide a starting point.