Looping over 3 dimensional arrays in CUDA to sum their elements - c++

I'm having some problems understanding how to loop over 3 dimensional arrays with a kernel.
This is the code I have so far:
#include <iostream>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
int main()
{
// Array properties
const int width = 1;
const int height = 1;
const int depth = 1;
// Declaration of arrays
float h_A[width][height][depth];
float h_B[width][height][depth];
float h_C[width][height][depth] = {{{0}}};
// Fill up arrays
srand(time(0));
for(int i = 0; i < width; i++){
for(int j = 0; j < height; j++){
for(int z = 0; z < depth; z++){
h_A[i][j][z] = rand()%1000;
h_B[i][j][z] = rand()%1000;
}
}
}
// Declaration of device pointers
cudaPitchedPtr d_A, d_B, d_C;
// Allocating memory in GPU
cudaExtent extent = make_cudaExtent(width*sizeof(float),height,depth);
cudaMalloc3D(&d_A, extent);
cudaMalloc3D(&d_B, extent);
cudaMalloc3D(&d_C, extent);
// Copying memory from host to device
cudaMemcpy3DParms p;
p.srcPtr = make_cudaPitchedPtr(&h_A, sizeof(float)*width, height, depth);
p.extent = extent;
p.kind = cudaMemcpyHostToDevice;
p.dstPtr = d_A;
cudaMemcpy3D(&p);
p.dstPtr = d_B;
cudaMemcpy3D(&p);
p.dstPtr = d_C;
cudaMemcpy3D(&p);
system("pause");
return 0;
}
How do I make a kernel that loops over each element in the arrays and adds them together?

There is an example on page 21 of the CUDA 4.0 programming guide for looping over 2D array of floats:
// Host code
int width = 64, height = 64;
float* devPtr;
size_t pitch;
cudaMallocPitch(&devPtr, &pitch,
width * sizeof(float), height);
MyKernel<<<100, 512>>>(devPtr, pitch, width, height);
// Device code
__global__ void MyKernel(float* devPtr, size_t pitch, int width, int height)
{
for (int r = 0; r < height; ++r)
{
float* row = (float*)((char*)devPtr + r * pitch);
for (int c = 0; c < width; ++c)
{
float element = row[c];
}
}
}
rewrite it to sum up elements should be easy. Additionally you can refer to this thread. When efficiency is concern, you might also look on parallel reduction approach in CUDA. This is used for example when implementing Monte Carlo simulation (see Multi Monte Carlo example).

Related

Pass 2D thrust::device_vector Complex Matrix to CUDA kernel function

I'm new in Cuda and and I'm trying to move my existing Project to GPU using Cuda.
My code are based on complex matrices and complex buffers.
For the first step, I tried to move That nested For loop Code to Cuda (the rest will be similar):
typedef thrust::complex<double> smp_t;
uint8_t *binbuffer = (uint8_t*) malloc(8 * bufsize * sizeof(uint8_t));
smp_t *sgbuf = (smp_t*) malloc(8 * bufsize * sizeof(smp_t));
smp_t *cnbuf = (smp_t*) malloc(8 * bufsize * sizeof(smp_t));
// Create matrix.
thrust::complex<double> i_unit(0.0, 1.0);
thrust::host_vector<thrust::host_vector<smp_t>> tw(decfactor);
// Fill the Matrix
for (size_t row = 0; row < 8; row++) {
for (size_t col = 0; col < 8; col++) {
std::complex<double> tmp =
exp(-i_unit * 2.0*M_PI * ((double) col*row) / (double)8);
tw[row].push_back(tmp);
}
}
/* The Code To Move to the GPU processing */
for (unsigned int i = 0; i < bufsize; i++) {
for (size_t ch = 0; ch < 8; ch++)
for (size_t k = 0; k < 8; k++)
cnbuf[ch*bufsize + i] += sgbuf[k*bufsize+i] * tw[ch].at(k);
}
That is the Code from the .cu file that will replace the current nested for loop:
__global__ void kernel_func(cuDoubleComplex *cnbuf, cuDoubleComplex *sgbuf, smp_t *tw, size_t block_size) {
unsigned int ch = threadIdx.x;
unsigned int k = blockIdx.x;
for (int x = 0; x < block_size; ++x) {
unsigned int sig_index = k*block_size+x;
unsigned int tw_index = ch*k;
unsigned int cn_index = ch*block_size+x;
cuDoubleComplex temp = cuCmul(sgbuf[sig_index], make_cuDoubleComplex(tw[tw_index].real(), tw[tw_index].imag()));
cnbuf[cn_index] = cuCadd(temp, cnbuf[cn_index]);
}
}
void kernel_wrap(
smp_t *cnbuf,
smp_t *sgbuf,
thrust::host_vector<thrust::host_vector<smp_t>>tw,
size_t buffer_size) {
smp_t *d_sgbuf;
smp_t *d_cnbuf;
thrust::device_vector<smp_t> d_tw(8*8);
thrust::copy(&tw[0][0], &tw[7][7], d_tw.begin());
cudaMalloc((void **)&d_sgbuf, buffer_size);
cudaMalloc((void **)&d_cnbuf, buffer_size);
cudaMemcpy(d_sgbuf, sgbuf, buffer_size, cudaMemcpyDeviceToHost);
cudaMemcpy(d_cnbuf, cnbuf, buffer_size, cudaMemcpyDeviceToHost);
thrust::raw_pointer_cast(d_tw.data());
kernel_func<<<8, 8>>>(
reinterpret_cast<cuDoubleComplex*>(d_cnbuf),
reinterpret_cast<cuDoubleComplex*>(d_sgbuf),
thrust::raw_pointer_cast(d_tw.data()),
buffer_size
);
cudaError_t varCudaError1 = cudaGetLastError();
if (varCudaError1 != cudaSuccess)
{
std::cout << "Failed to launch subDelimiterExamine kernel (error code: " << cudaGetErrorString(varCudaError1) << ")!" << std::endl;
exit(EXIT_FAILURE);
}
cudaMemcpy(sgbuf, d_sgbuf, buffer_size, cudaMemcpyHostToDevice);
cudaMemcpy(cnbuf, d_cnbuf, buffer_size, cudaMemcpyHostToDevice);
}
When I'm running the code, I get the error:
Failed to launch subDelimiterExamine kernel (error code: invalid argument)!
I think that the argument that causing the troubles is the 'd_tw'.
So, my questions are:
What am I'm doing wrong with the cast of <thrust::host_vector<thrust::host_vector smp_t>> to <thrust::device_vector smp_t>> (from 2d Matrix to one flattened arr)?
Is there a better whey to work with 2D Complex numbers in CUDA?
The documentation about Complex arrays in Cuda are very poorly, where can I read abound the work with Cuda Complex matrices?
Thanks!!!!
There were various problems. I will list a few, and probably miss some. So please refer to the example code I have given for additional differences.
The most immediate problem is here:
thrust::copy(&tw[0][0], &tw[7][7], d_tw.begin());
This is what is giving rise to the invalid argument error you are seeing. Underneath the hood, thrust is going to try to use a cudaMemcpyAsync operation for this, because this is inherently a copy from host to device. We will fix this by replacing it with an ordinary cudaMemcpy operation, but to understand how to construct that, it's necessary to understand item 2.
You seem to think that a vector of vectors implies contiguous storage. It does not and that statement is not specific to thrust. Since a thrust::host_vector of vectors (or even std::vector of vectors) does not imply contiguous storage, we can't easily construct a single operation, such as cudaMemcpy or thrust::copy to copy this data. Therefore it will be necessary to explicitly flatten it.
Your directions of copy on the cudaMemcpy operations are universally backward. Where you should have had cudaMemcpyHostToDevice you had cudaMemcpyDeviceToHost, and vice-versa.
The CUDA cuComplex.h header file predates thrust, and was provided for a quick C-style method to work with complex numbers. There is no documentation for it - you have to read the file itself and work out how to use it, as seem to have already done. However, since you are using thrust::complex<> anyway, it's far simpler just to use that coding paradigm, and write you device code to look almost exactly like your host code.
You had various transfer sizes wrong. cudaMemcpy takes a size in bytes to transfer.
What follows is an example, cobbled together from the pieces you have shown, with a variety of "fixes". I'm not claiming its in any way perfect or correct, but it avoids the issues I have outlined above. Furthermore, depending on how you compile with or with a -DUSE_KERNEL define, it will either run your "original" host code and display the output, or the kernel code and display the output. According to my testing, the outputs match.
$ cat t1751.cu
#include <thrust/complex.h>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <cstdint>
#include <cuComplex.h>
typedef thrust::complex<double> smp_t;
__global__ void kernel_func_old(cuDoubleComplex *cnbuf, cuDoubleComplex *sgbuf, smp_t *tw, size_t block_size) {
unsigned int ch = threadIdx.x;
unsigned int k = blockIdx.x;
for (int x = 0; x < block_size; ++x) {
unsigned int sig_index = k*block_size+x;
unsigned int tw_index = ch*k;
unsigned int cn_index = ch*block_size+x;
cuDoubleComplex temp = cuCmul(sgbuf[sig_index], make_cuDoubleComplex(tw[tw_index].real(), tw[tw_index].imag()));
cnbuf[cn_index] = cuCadd(temp, cnbuf[cn_index]);
}
}
__global__ void kernel_func(smp_t *cnbuf, smp_t *sgbuf, smp_t *tw, size_t block_size) {
unsigned row = blockIdx.x;
unsigned col = threadIdx.x;
unsigned idx = row*block_size+col;
for (int k = 0; k < 8; k++)
cnbuf[idx] += sgbuf[k*block_size+col] * tw[row*block_size+k];
}
void kernel_wrap(
smp_t *cnbuf,
smp_t *sgbuf,
thrust::host_vector<thrust::host_vector<smp_t>>tw,
size_t buffer_size) {
smp_t *d_sgbuf;
smp_t *d_cnbuf;
thrust::device_vector<smp_t> d_tw(8*8);
// thrust::copy(&tw[0][0], &tw[7][7], d_tw.begin());
thrust::host_vector<smp_t> htw(buffer_size*buffer_size);
for (int i = 0; i < buffer_size; i++)
for (int j = 0; j < buffer_size; j++)
htw[i*buffer_size + j] = tw[i][j];
cudaMemcpy(thrust::raw_pointer_cast(d_tw.data()), &htw[0], 8*8*sizeof(smp_t), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_sgbuf, buffer_size*buffer_size*sizeof(smp_t));
cudaMalloc((void **)&d_cnbuf, buffer_size*buffer_size*sizeof(smp_t));
cudaMemcpy(d_sgbuf, sgbuf, buffer_size*buffer_size*sizeof(smp_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_cnbuf, cnbuf, buffer_size*buffer_size*sizeof(smp_t), cudaMemcpyHostToDevice);
thrust::raw_pointer_cast(d_tw.data());
kernel_func<<<8, 8>>>(d_cnbuf,d_sgbuf,thrust::raw_pointer_cast(d_tw.data()),buffer_size);
cudaError_t varCudaError1 = cudaGetLastError();
if (varCudaError1 != cudaSuccess)
{
std::cout << "Failed to launch subDelimiterExamine kernel (error code: " << cudaGetErrorString(varCudaError1) << ")!" << std::endl;
exit(EXIT_FAILURE);
}
// cudaMemcpy(sgbuf, d_sgbuf, buffer_size*buffer_size*sizeof(smp_t), cudaMemcpyDeviceToHost);
cudaMemcpy(cnbuf, d_cnbuf, buffer_size*buffer_size*sizeof(smp_t), cudaMemcpyDeviceToHost);
for (int i = 0; i < 8; i++)
for (int j = 0; j < 8; j++)
std::cout << cnbuf[i*8+j].real() << "," << cnbuf[i*8+j].imag() << std::endl;
}
int main(){
const int bufsize = 8;
const int decfactor = 8;
uint8_t *binbuffer = (uint8_t*) malloc(8 * bufsize * sizeof(uint8_t));
smp_t *sgbuf = (smp_t*) malloc(8 * bufsize * sizeof(smp_t));
smp_t *cnbuf = (smp_t*) malloc(8 * bufsize * sizeof(smp_t));
memset(cnbuf, 0, 8*bufsize*sizeof(smp_t));
// Create matrix.
thrust::complex<double> i_unit(0.0, 1.0);
#ifndef USE_KERNEL
std::vector<std::vector<smp_t> > tw(decfactor);
#else
thrust::host_vector<thrust::host_vector<smp_t>> tw(decfactor);
#endif
// Fill the Matrix
for (size_t row = 0; row < 8; row++) {
for (size_t col = 0; col < 8; col++) {
std::complex<double> tmp = exp(-i_unit * 2.0*M_PI * ((double) col*row) / (double)8);
tw[row].push_back(tmp);
}
}
thrust::complex<double> test(1.0, 1.0);
for (int i = 0; i < 8*8; i++) sgbuf[i] = test;
#ifndef USE_KERNEL
/* The Code To Move to the GPU processing */
for (unsigned int i = 0; i < bufsize; i++) {
for (size_t ch = 0; ch < 8; ch++)
for (size_t k = 0; k < 8; k++)
cnbuf[ch*bufsize + i] += sgbuf[k*bufsize+i] * tw[ch].at(k);
}
for (int i = 0; i < 8; i++)
for (int j = 0; j < 8; j++)
std::cout << cnbuf[i*8+j].real() << "," << cnbuf[i*8+j].imag() << std::endl;
#else
kernel_wrap(cnbuf,sgbuf,tw,bufsize);
#endif
}
$ nvcc -o t1751 t1751.cu -std=c++11
$ ./t1751 >out_host.txt
$ nvcc -o t1751 t1751.cu -std=c++11 -DUSE_KERNEL
$ ./t1751 >out_device.txt
$ diff out_host.txt out_device.txt
$
Remember, this is mostly your code, I am not claiming it is correct, or defect-free, or suitable for any particular purpose. Use it at your own risk.

How to call existing host function from device function in cuda [closed]

Closed. This question does not meet Stack Overflow guidelines. It is not currently accepting answers.
Closed 8 years ago.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
We don’t allow questions seeking recommendations for books, tools, software libraries, and more. You can edit the question so it can be answered with facts and citations.
Improve this question
I have seen a similar question here
However,I could not get an exact answer here, and it is written in 2012.
I am trying to call cublasStatus_t cublasSgbmv(...) function, which is defined in "cublas_v2.h", in a __global__ function. However, I could not use the dynamic parallelism feature. I only have 1 source.cu file. However, I have read that I should compile it in a dynamic way so that it separates device and host functions, then I can link these outputs.
Is there anyone who knows how to do it, or a good source to explain it?
Thanks in advance
edit : if undervoted, please explain the reason at least for me to learn my mistake?
edit2 :
my specific problem is, I'm using the following code in my Source.cu :
#include <iostream>
#include <vector>
#include <cuda.h>
#include <cstdio>
#include <stdio.h>
#include <device_launch_parameters.h>
#include <stdlib.h> //srand(), rand()
#include <time.h>
#include <builtin_types.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#define IDX2C(i ,j , ld ) ((( j )*( ld ))+( i ))
#define HEIGHT 4
#define WIDTH 4
#define V 4
#define KL 2
#define KU 1
#define THREADS_PER_BLOCK 512
#pragma comment(lib, "cublas")
//#pragma comment(lib, "helper_cuda")
using namespace std;
void create_Matrix(int* matrix, int width, int height){
int i, len;
len = height * width;
srand(time(NULL));
for (i = 0; i < len; i++){
matrix[i] = rand() % 10 + 1; //generates number between 1-10
}
}
template <typename T>
void print_vector(T* vector, int len){
for (int i = 0; i < len; i++)
cout << vector[i] << " ";
cout << endl;
}
template <typename T>
void creating_bandedMatrix(T* bandedMatrix, int height, int width, int ku, int kl){
//fill matrix with zeros at the beginning
int i, len;
len = height * width;
for (i = 0; i < len; i++){
bandedMatrix[i] = 0; //generates number between 1-10
}
srand(time(NULL));
//filling banded diagonal
int start, end;
for (int i = 0; i < height; i++){
start = i - kl;
if (start < 0)
start = 0;
end = i + ku + 1;
if (end > width)
end = width;
for (int j = start; j < end; j++){
*(bandedMatrix + (i*width) + j) = (float)(rand() % (10) + 1); //rand() / (T)RAND_MAX;;
}
}
}
template <typename T>
void print_matrix(T* matrix, int width, int height){
int len = width*height;
cout << "asdsffffff" << endl;
for (int i = 0; i < len; i++){
if (!(i%width))
cout << endl;
cout << i << ":" <<matrix[i] << " ";
}
cout << endl;
}
template <typename T>
void computeMatrixVectorMultiplication(T* bandedMatrix, T* vector2){
T row_sum = 0;
T* bandedHostResult = (T*)malloc(WIDTH * sizeof(T));
for (int i = 0; i < HEIGHT; i++){
row_sum = 0;
for (int j = 0; j < WIDTH; j++){
row_sum += (*(bandedMatrix + i*WIDTH + j)) * vector2[j];
}
bandedHostResult[i] = row_sum;
}
//priting the result
cout << "\n\nBanded Host Result...\n";
print_vector(bandedHostResult, WIDTH);
}
template <typename T>
void fillLapackMatrix(T* lapack_matrix, T* bandedMatrix, int kl, int ku, int banded_w, int banded_h, int lapack_w, int lapack_h){
int i, j, lapack_i;
int len = lapack_h * lapack_w;
for (i = 0; i < len; i++){
lapack_matrix[i] = 0; //generates number between 1-10
}
for (i = 0; i < banded_w; i++){
for (j = 0; j < banded_h; j++){
lapack_i = ku + i - j;
*(lapack_matrix + lapack_i*lapack_w + j) = *(bandedMatrix + i*banded_w + j);
//lapack_matrix[lapack_i*lapack_w + j] = bandedMatrix[i*bandedMatrix + j];
}
}
}
__global__ void device_cublasSgbmv(int m,int n,int kl, int ku,float* alpha, float* A, int lda ,float* B,int ldb,float*R, int ldr, float* beta){
int index = blockIdx.x * blockDim.x + threadIdx.x;
cublasHandle_t handle;
cublasCreate(&handle);
cublasOperation_t trans = CUBLAS_OP_N;
float* dev_x;
cudaMalloc((void**)&dev_x,sizeof(float) * n);
if(index < ldr){
cublasSgbmv(handle, trans,m, n, kl, ku, alpha, A, m, B+index*n, 1, beta, R+index*n, 1);
index = 0;
}
}
void fillNormalMatrix(float* B,int h,int w){
for(int i = 0; i < h;i++){
for(int j = 0; j < w;j++){
B[i*w + j] = 1;
}
}
}
int main()
{
cublasStatus_t status;
float *A;
float *x, *y;
float *dev_x, *dev_y;
int incx, incy;
float *dev_A = 0;
float alpha = 1.0f;
float beta = 0.0f;
int matrixSize = WIDTH * HEIGHT;
int i, j;
cublasHandle_t handle;
/* Initialize CUBLAS */
status = cublasCreate(&handle);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! CUBLAS initialization error\n");
return EXIT_FAILURE;
}
//Allocate host memory for the matrices
A = (float *)malloc(matrixSize* sizeof(float));
//Allocate memory for host vectors
x = (float *)malloc(WIDTH * sizeof(float));
y = (float*)malloc(WIDTH * sizeof(float));
// Fill the matrices with test data
creating_bandedMatrix(A, WIDTH, HEIGHT, KU, KL);
cout << "Banded Matrix\n";
print_matrix(A, WIDTH, HEIGHT);
//Fill the vectors with random data
for (i = 0; i < WIDTH; i++){
x[i] = 1;// (float)(rand() % (10) + 1);:
y[i] = (float)(rand() % (10) + 1);
}
cout << "\nvector x...\n";
print_vector(x, WIDTH);
//cout << "\nvector y...\n";
//print_vector(y, WIDTH);
//Allocate device memory for the matrix
if (cudaMalloc((void **)&dev_A, matrixSize * sizeof(float)) != cudaSuccess)
{
fprintf(stderr, "!!!! device memory allocation error (allocate A)\n");
return EXIT_FAILURE;
}
//Allocate device memory for vectors
if (cudaMalloc((void**)&dev_x, WIDTH * sizeof(float)) != cudaSuccess){
fprintf(stderr, "Device Vector Allocation PROBLEM\n");
return EXIT_FAILURE;
}
if (cudaMalloc((void**)&dev_y, WIDTH * sizeof(float)) != cudaSuccess){
fprintf(stderr, "Device Vector Allocation PROBLEM\n");
return EXIT_FAILURE;
}
// Initialize the device vectors with the host vectors
status = cublasSetVector(WIDTH, sizeof(float), x, 1, dev_x, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write x vector)\n");
return EXIT_FAILURE;
}
status = cublasSetVector(WIDTH, sizeof(float), y, 1, dev_y, 1);
if (status != CUBLAS_STATUS_SUCCESS)
{
fprintf(stderr, "!!!! device access error (write y vector)\n");
return EXIT_FAILURE;
}
//initialize matrix with lapack format
int lapack_width = WIDTH > HEIGHT ? HEIGHT : WIDTH;
int lapack_height = KL + KU + 1;
int lapackSize = lapack_height * lapack_width;
float* lapack_matrix = (float*)malloc(lapackSize * sizeof(float));
fillLapackMatrix(lapack_matrix, A, KL, KU, WIDTH, HEIGHT, lapack_width, lapack_height);
cout << "\n\nLAPACK MAtrix\n";
print_matrix(lapack_matrix, lapack_width, lapack_height);
//convert to column column matrix
float* col = (float*)malloc(lapackSize * sizeof(float));
for (i = 0; i < WIDTH; i++){
for (j = 0; j < HEIGHT; j++){
col[i + WIDTH*j] = lapack_matrix[WIDTH*i + j];
}
}
cout << "Lapack Column Based Matrix\n";
print_matrix(col,HEIGHT-1,WIDTH);
//status = cublasSetVector(lapackSize, sizeof(float), A, 1, dev_A, 1);
cublasSetMatrix(HEIGHT, WIDTH, sizeof(float), col, HEIGHT, dev_A, HEIGHT);
cublasOperation_t trans = CUBLAS_OP_N;
incy = incx = 1;
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////// Banded Matrix Matrix Multipllicatio ///////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
float* B,*dev_B,*dev_R,*R;
B = (float*)malloc(WIDTH*HEIGHT*sizeof(float));
R = (float*)malloc(WIDTH*HEIGHT*sizeof(float));
fillNormalMatrix(B,WIDTH,HEIGHT);
cudaMalloc((void**)&dev_B,matrixSize*sizeof(*B));
cudaMalloc((void**)&dev_R,matrixSize*sizeof(*R));
cublasSetMatrix(HEIGHT, WIDTH, sizeof(*B), B, HEIGHT, dev_B, HEIGHT);
cout << "Matrix B\n";
print_matrix(B,HEIGHT,WIDTH);
cout << "gfsdf\n";
device_cublasSgbmv<<<1,4>>>(HEIGHT, WIDTH, KL, KU, &alpha, dev_A, WIDTH, dev_B, HEIGHT, dev_R, HEIGHT,&beta);
cout << "after\n";
cublasGetMatrix(HEIGHT,WIDTH, sizeof (*R) ,dev_R ,WIDTH,R,WIDTH);
getchar();
return 0;
}
and compile it like :
nvcc -gencode=arch=compute_35,code=sm_35 -lcublas -lcudadevrt -O3 Source.cu -o Source.o -dc
g++ Source.o -lcublas -lcudart
then, I get the following :
In function `__sti____cudaRegisterAll_48_tmpxft_00001f1e_00000000_6_Source_cpp1_ii_ebe2258a()':
tmpxft_00001f1e_00000000-3_lapack_vector.cudafe1.cpp:(.text.startup+0x575): undefined reference to `__cudaRegisterLinkedBinary_48_tmpxft_00001f1e_00000000_6_Source_cpp1_ii_ebe2258a'
collect2: error: ld returned 1 exit status
You can compile and link the code you have now shown with a single command like this:
nvcc -arch=sm_35 -rdc=true -lcublas -lcublas_device -lcudadevrt -o test Source.cu
You may get some warnings like this:
nvlink warning : SM Arch ('sm_35') not found in '/usr/local/cuda/bin/..//lib64/libcublas_device.a:maxwell_sgemm.asm.o'
nvlink warning : SM Arch ('sm_35') not found in '/usr/local/cuda/bin/..//lib64/libcublas_device.a:maxwell_sm50_sgemm.o'
nvlink warning : SM Arch ('sm_35') not found in '/usr/local/cuda/bin/..//lib64/libcublas_device.a:maxwell_sm50_ssyrk.o'
Those can be safely ignored.

Arranging the grid size and block size

I have 200 matrices A[i] (whose dimension is 4096*48), and 48 vectors v[j](whose dimension is 48*1). I want to calculate A[i]*v[j], (i=0:199,j=1:47).
I think about how to arrange my grid size and block size from yesterday. But I don't figure out an answer now. Could anyone give me some advice?
Max num of per block is 512. This is my working environment.
The following is my code. It works right. I have checked. But it is slower than Matlab :(
#include<iostream>
#include <mat.h>
#include <time.h>
#include <cuda_runtime.h>
#include "cuda.h"
using std::cout;
using std::endl;
using namespace cv;
using namespace std;
#include <limits>
#include <iostream>
#include <cstdlib>
using namespace std;
#define kernel_size 48
////////////////////////////////////////////
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(1,B.height);
dim3 dimGrid(A.height, C.width);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockCol = blockIdx.y;
int blockRow = blockIdx.x;
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[1][kernel_size];
__shared__ float Bs[kernel_size][1];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[0][row] = A.elements[blockRow * A.stride + row+B.height*blockCol];
Bs[row][0] = B.elements[row];
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < B.height; ++e)
{
Cvalue += As[0][e] * Bs[e][0];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
// Write Csub to device memory
// Each thread writes one element
C.elements[blockRow * C.stride +blockCol]= Cvalue;
}
//////////////////
float * gen_matrix(int n /*row*/, int m /*col*/){
float *A;
//srand(1023);
A = (float *) malloc(n*m*sizeof(float));
for(int row = 0;row < n;row++)
for(int col = 0;col < m;col++) {
A[row*m+col] = rand()%10;
}
/*
// print matrix elements.
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j)
cout << " [" << i << "," << j << "] " << A[i*m+j] ;
cout << endl;
}
*/
return A;
}
int main()
{
int k=kernel_size;
int s=2000;
int m =4096;
//int m=2;
//int s=1;
int n = k*s;
float *Ae = gen_matrix(m,n);
float *Be= gen_matrix(k,1);00
float *Ce=(float *) malloc(m*s*sizeof(float));
Matrix A ={n,m,n,Ae};
Matrix B ={1,k,1,Be};
Matrix C ={s,m,s,Ce};
const clock_t begin_time = clock();
MatMul(A, B, C);
std::cout << float( clock () - begin_time ) / CLOCKS_PER_SEC;
for (int i = 0; i < 3; ++i) {
for (int j = 0; j <7; ++j)
cout << " [" << i << "," << j << "] " << Ce[i*m+j] ;
cout << endl;
}
//check
float *Ce2=(float *) malloc(s*m*sizeof(float));
for (int i = 0; i < m; i++)
{
for (int j = 0; j < s; j++)
{
Ce2[i*s+j]=0;
}
}
for (int i = 0; i < m; i++)
{
for (int j = 0; j < s; j++)
{
for (int ind = 0; ind < k; ind++)
{
Ce2[i*s+j]=Ce2[i*s+j]+Ae[j*k+ind+i*k*s]*Be[ind];
// printf("%f---****%f\n",Ae[j*k+ind+i*k*s],Be[ind]);
}
if (Ce2[i*s+j]!= Ce[i*s+j])
{
printf("%f----%f\n",Ce2[i*s+j],Ce[i*s+j]);
}
}
}
free(Ae);
free(Be);
free(Ce);
}
This is just a matrix-matrix multiplication problem. If you want things to run fast, you should not be writing your own matrix-matrix multiply code. Use CUBLAS Sgemm.
Conceptually, if you arrange your A matrices like this:
[A0]
[A1]
[A2]
...
[A199]
then you will have a new matrix AA that is (4096*200) rows x 48 columns.
Arrange your 48 V vectors (48x1) in a 48x48 matrix (VV):
[V0][V1][V2]...[V47]
(each V vector is a column of the new matrix VV)
You now have a single matrix multiplication problem (AA*VV) that is (4096*200)x48 multiplied by 48x48 which yields a (4096*200) x 48 result. This result has one column vector of length 4096*200 that contains 200 results of the individual matrix-vector multiplications you were trying to do. The 200 results per column * 48 columns combine to give you all of the results that your original problem would create. The first column would contain the results of [V0] multiplied by each of the 200 A matrices, the second column would contain the results of [V1] multiplied by each of the 200 A matrices, etc.
Once you have arranged your data like this, using CUBLAS Sgemm should be the quickest possible approach on the GPU. Note that CUBLAS expects the underlying storage to be column-major, so if you are rearranging your data, you will probably want to keep this in mind. There is a CUDA sample code for CUBLAS matrix multiplication.
In your code it appears you actually have 2000 A matrices, but your question refers to 200. I have used 200 for example in my answer, but the concept would be the same with 2000 A matrices.

Copying from cuda 3D memory to linear memory: copied data is not where I expected

Here is my issue:
I have a 3D array of float3 on my device:
int size[3] = {416,464,512};
cudaExtent extent = make_cudaExtent(size[0]*sizeof(float3),size[1],size[2]);
cudaPitchedPtr renderedVolume;
int ret = cudaMalloc3D(&renderedVolume, extent);
size_t pitch = renderedVolume.pitch; //pitch = 5,120
size_t slicePitch = pitch * size[1]; //slicePitch = 2,375,680
Then I work with it and make it full of outstanding data.
After that I wish to copy it on a 1D linear memory on my host:
float *host_memory = (float*)malloc(size[0]*size[1]*size[2]*sizeof(float3));
cudaMemcpy3DParms p = {0};
p.srcPtr = renderedVolume;
p.dstPtr = make_cudaPitchedPtr(host_memory,size[0]*sizeof(float3),size[0],size[1]);
p.extent = make_cudaExtent(size[0]*sizeof(float3),size[1],size[2]);
p.srcPos = make_cudaPos(0,0,0);
p.dstPos = make_cudaPos(0,0,0);
p.kind=cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p);
I am comparing the result in host_memory with the data I initially wrote tu renderedVolume (my_data) and with the data I read in my 3Dmemory, slice by slice:
float* test1 = (float*)malloc(size[0]*size[1]*sizeof(float3));
cudaMemcpy(test1, myData, size[0]*size[1]*sizeof(float3) , cudaMemcpyDeviceToHost);
float* test2 = (float*)malloc(size[0]*size[1]*sizeof(float3));
cudaMemcpy(test2,(char*)renderedVolume.ptr + slicePitch * i,size[0]*size[1]*sizeof(float3), cudaMemcpyDeviceToHost);
Problem:
The first slice (i=0) is ok, I have the same data in host_memory, test1 and test2.
In the second slice, I have the same data in test1 and test2. However, I should find this data in host_memory+579072 (=number of float per slice, also heigth*pitch of the destination pitched pointer) and I find it in host_memory+577504. It is off by 1568 bytes, which corresponds to nothing that I am aware of, and this is why I would very much appreciate if any of you have an idea of what the problem might be in my code ?
This is a late answer provided to remove this question from the unanswered list.
Below, I'm providing a full code showing how to allocate 3D memory by cudaMalloc3D, moving a host allocated 1D memory to 3D device memory by cudaMemcpy3D, performing some operations on the 3D device data by the test_kernel_3D __global__ function and moving the 3D result data back to 1D host memory, again by cudaMemcpy3D.
The __global__ function test_kernel_3D squares each element of the 3D device memory. In particular, each thread of a 2D grid takes care of performing a for loop along the "depth" dimension.
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<conio.h>
#define BLOCKSIZE_x 16
#define BLOCKSIZE_y 16
#define N 128
#define M 64
#define W 16
/*****************/
/* CUDA MEMCHECK */
/*****************/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) { getch(); exit(code); }
}
}
/*******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
/******************/
/* TEST KERNEL 3D */
/******************/
__global__ void test_kernel_3D(cudaPitchedPtr devPitchedPtr)
{
int tidx = blockIdx.x*blockDim.x+threadIdx.x;
int tidy = blockIdx.y*blockDim.y+threadIdx.y;
char* devPtr = (char*) devPitchedPtr.ptr;
size_t pitch = devPitchedPtr.pitch;
size_t slicePitch = pitch * N;
for (int w = 0; w < W; w++) {
char* slice = devPtr + w * slicePitch;
float* row = (float*)(slice + tidy * pitch);
row[tidx] = row[tidx] * row[tidx];
}
}
/********/
/* MAIN */
/********/
int main()
{
float a[N][M][W];
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
for (int w=0; w<W; w++) {
a[i][j][w] = 3.f;
//printf("row %i column %i depth %i value %f \n",i,j,w,a[i][j][w]);
}
// --- 3D pitched allocation and host->device memcopy
cudaExtent extent = make_cudaExtent(M * sizeof(float), N, W);
cudaPitchedPtr devPitchedPtr;
gpuErrchk(cudaMalloc3D(&devPitchedPtr, extent));
cudaMemcpy3DParms p = { 0 };
p.srcPtr.ptr = a;
p.srcPtr.pitch = M * sizeof(float);
p.srcPtr.xsize = M;
p.srcPtr.ysize = N;
p.dstPtr.ptr = devPitchedPtr.ptr;
p.dstPtr.pitch = devPitchedPtr.pitch;
p.dstPtr.xsize = M;
p.dstPtr.ysize = N;
p.extent.width = M * sizeof(float);
p.extent.height = N;
p.extent.depth = W;
p.kind = cudaMemcpyHostToDevice;
gpuErrchk(cudaMemcpy3D(&p));
dim3 GridSize(iDivUp(M,BLOCKSIZE_x),iDivUp(N,BLOCKSIZE_y));
dim3 BlockSize(BLOCKSIZE_y,BLOCKSIZE_x);
test_kernel_3D<<<GridSize,BlockSize>>>(devPitchedPtr);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
p.srcPtr.ptr = devPitchedPtr.ptr;
p.srcPtr.pitch = devPitchedPtr.pitch;
p.dstPtr.ptr = a;
p.dstPtr.pitch = M * sizeof(float);
p.kind = cudaMemcpyDeviceToHost;
gpuErrchk(cudaMemcpy3D(&p));
for (int i=0; i<N; i++)
for (int j=0; j<M; j++)
for (int w=0; w<W; w++)
printf("row %i column %i depth %i value %f\n",i,j,w,a[i][j][w]);
getch();
return 0;
}

How to speed up vector initialization c++

I had a previous question about a stack overflow error and switch to vectors for my arrays of objects. That question can be referenced here if needed: How to get rid of stack overflow error
My current question is however, how do I speed up the initialization of the vectors. My current method currently takes ~15 seconds. Using arrays instead of vectors it took like a second with a size of arrays small enough that didn't throw the stack overflow error.
Here is how I am initializing it:
in main.cpp I initialize my dungeon object:
dungeon = Dungeon(0, &textureHandler, MIN_X, MAX_Y);
in my dungeon(...) constructor, I initialize my 5x5 vector of rooms and call loadDungeon:
Dungeon::Dungeon(int dungeonID, TextureHandler* textureHandler, int topLeftX, int topLeftY)
{
currentRoomRow = 0;
currentRoomCol = 0;
for (int r = 0; r < MAX_RM_ROWS; ++r)
{
rooms.push_back(vector<Room>());
for (int c = 0; c < MAX_RM_COLS; ++c)
{
rooms[r].push_back(Room());
}
}
loadDungeon(dungeonID, textureHandler, topLeftX, topLeftY);
}
my Room constructor populates my 30x50 vector of cells (so I can set them up in the loadDungeon function):
Room::Room()
{
for (int r = 0; r < MAX_ROWS; ++r)
{
cells.push_back(vector<Cell>());
for (int c = 0; c < MAX_COLS; ++c)
{
cells[r].push_back(Cell());
}
}
}
My default cell constructor is simple and isn't doing much but I'll post it anyway:
Cell::Cell()
{
x = 0;
y = 0;
width = 16;
height = 16;
solid = false;
texCoords.push_back(0);
texCoords.push_back(0);
texCoords.push_back(1);
texCoords.push_back(0);
texCoords.push_back(1);
texCoords.push_back(1);
texCoords.push_back(0);
texCoords.push_back(1);
}
And lastly my loadDungeon() function will set up the cells. Eventually this will read from a file and load the cells up but for now I would like to optimize this a bit if possible.
void Dungeon::loadDungeon(int dungeonID, TextureHandler* textureHandler, int topLeftX, int topLeftY)
{
int startX = topLeftX + (textureHandler->getSpriteWidth()/2);
int startY = topLeftY - (textureHandler->getSpriteHeight()/2);
int xOffset = 0;
int yOffset = 0;
for (int r = 0; r < MAX_RM_ROWS; ++r)
{
for (int c = 0; c < MAX_RM_COLS; ++c)
{
for (int cellRow = 0; cellRow < rooms[r][c].getMaxRows(); ++cellRow)
{
xOffset = 0;
for (int cellCol = 0; cellCol < rooms[r][c].getMaxCols(); ++cellCol)
{
rooms[r][c].setupCell(cellRow, cellCol, startX + xOffset, startY - yOffset, textureHandler->getSpriteWidth(), textureHandler->getSpriteHeight(), false, textureHandler->getSpriteTexCoords("grass"));
xOffset += textureHandler->getSpriteWidth();
}
yOffset += textureHandler->getSpriteHeight();
}
}
}
currentDungeon = dungeonID;
currentRoomRow = 0;
currentRoomCol = 0;
}
So how can I speed this up so it doesn't take ~15 seconds to load up every time. I feel like it shouldn't take 15 seconds to load a simple 2D game.
SOLUTION
Well my solution was to use std::vector::reserve call (rooms.reserve in my code and it ended up working well. I changed my function Dungeon::loadDungeon to Dungeon::loadDefaultDungeon because it now loads off a save file.
Anyway here is the code (I got it down to about 4-5 seconds from ~15+ seconds in debug mode):
Dungeon::Dungeon()
{
rooms.reserve(MAX_RM_ROWS * MAX_RM_COLS);
currentDungeon = 0;
currentRoomRow = 0;
currentRoomCol = 0;
}
void Dungeon::loadDefaultDungeon(TextureHandler* textureHandler, int topLeftX, int topLeftY)
{
int startX = topLeftX + (textureHandler->getSpriteWidth()/2);
int startY = topLeftY - (textureHandler->getSpriteHeight()/2);
int xOffset = 0;
int yOffset = 0;
cerr << "Loading default dungeon..." << endl;
for (int roomRow = 0; roomRow < MAX_RM_ROWS; ++roomRow)
{
for (int roomCol = 0; roomCol < MAX_RM_COLS; ++roomCol)
{
rooms.push_back(Room());
int curRoom = roomRow * MAX_RM_COLS + roomCol;
for (int cellRow = 0; cellRow < rooms[curRoom].getMaxRows(); ++cellRow)
{
for (int cellCol = 0; cellCol < rooms[curRoom].getMaxCols(); ++cellCol)
{
rooms[curRoom].setupCell(cellRow, cellCol, startX + xOffset, startY - yOffset, textureHandler->getSpriteWidth(), textureHandler->getSpriteHeight(), false, textureHandler->getSpriteTexCoords("default"), "default");
xOffset += textureHandler->getSpriteWidth();
}
yOffset += textureHandler->getSpriteHeight();
xOffset = 0;
}
cerr << " room " << curRoom << " complete" << endl;
}
}
cerr << "default dungeon loaded" << endl;
}
Room::Room()
{
cells.reserve(MAX_ROWS * MAX_COLS);
for (int r = 0; r < MAX_ROWS; ++r)
{
for (int c = 0; c < MAX_COLS; ++c)
{
cells.push_back(Cell());
}
}
}
void Room::setupCell(int row, int col, float x, float y, float width, float height, bool solid, /*std::array<float, 8>*/ vector<float> texCoords, string texName)
{
cells[row * MAX_COLS + col].setup(x, y, width, height, solid, texCoords, texName);
}
void Cell::setup(float x, float y, float width, float height, bool solid, /*std::array<float,8>*/ vector<float> t, string texName)
{
this->x = x;
this->y = y;
this->width = width;
this->height = height;
this->solid = solid;
for (int i = 0; i < t.size(); ++i)
this->texCoords.push_back(t[i]);
this->texName = texName;
}
It seems wasteful to have so many dynamic allocations. You can get away with one single allocation by flattening out your vector and accessing it in strides:
std::vector<Room> rooms;
rooms.resize(MAX_RM_ROWS * MAX_RM_COLS);
for (unsigned int i = 0; i != MAX_RM_ROWS; ++i)
{
for (unsigned int j = 0; j != MAX_RM_COLS; ++j)
{
Room & r = rooms[i * MAX_RM_COLS + j];
// use `r` ^^^^^^^^^^^^^^^^^^^-----<< strides!
}
}
Note how resize is performed exactly once, incurring only one single allocation, as well as default-constructing each element. If you'd rather construct each element specifically, use rooms.reserve(MAX_RM_ROWS * MAX_RM_COLS); instead and populate the vector in the loop.
You may also wish to profile with rows and columns swapped and see which is faster.
Since it seems that your vectors have their size defined at compile time, if you can use C++11, you may consider using std::array instead of std::vector. std::array cannot be resized and lacks many of the operations in std::vector, but is much more lightweight and it seems a good fit for what you are doing.
As an example, you could declare cells as:
#include <array>
/* ... */
std::array<std::array<Cell, MAX_COLS>, MAX_ROWS> cells;
UPDATE: since a locally defined std::array allocates its internal array on the stack, the OP will experience a stack overflow due to the considerably large size of the arrays. Still, it is possible to use an std::array (and its benefits compared to using std::vector), by allocating the array on the heap. That can be done by doing something like:
typedef std::array<std::array<Cell, MAX_COLS>, MAX_ROWS> Map;
Map* cells;
/* ... */
cells = new Map();
Even better, smart pointers can be used:
#include <memory>
/* ... */
std::unique_ptr<Map> cells;
cells = std::unique_ptr(new Map());