3D Elementwise Matrix Multiplication in CUDA? - c++

I have a 2D Matrix Multiplication program using the following kernel:
__global__ void multKernel(int *a, int *b, int *c, int N)
{
int column = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row * N + column;
if(column < N && row < N)
{
c[index] = a[index] * b[index];
}
}
Now, I'd like to create a 3D matrix multiplication kernel, but I'm having trouble finding examples of how do create one (also, I'm terrible at reading mathematical formulae, it's something I need to improve on).
I know the GPU example will involve using
threadIdx.z
and so on, but I'm a bit lost with how to do it.
Could anyone point me in the right direction to either some formulae or sample code? Or even provide a basic example? I have a CPU example which should work, I think.
void matrixMult3D(int *a, int *b, int *c, int *z, int N)
{
int index;
for(int column = 0; column < N; column++)
{
for(int row = 0; row < N; row++)
{
for (int z = 0; z < N; z++)
{
index = row * N + column + z;
c[index] = a[index] * b[index] * z[index];
}
}
}
}
Am I at least on the right track?

Because what you are actually doing is just an element-wise product (I hesitate to call it a Hadamard Product because that isn't defined for hyper matrices AFAIK), you don't need to do anything differently from the simplest 1D version of your kernel code. Something like this:
template<int ndim>
__global__ void multKernel(int *a, int *b, int *c, int *z, int N)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
int idxmax = 1;
#pragma unroll
for(int i=0; i < ndim; i++) {
idxmax *= N;
}
for(; idx < idxmax; idx+=stride) {
c[index] = a[index] * b[index] * z[index];
}
}
[disclaimer: code written in browser, never compiled or run. use at own risk]
would work for any dimension of array with dimensions N (ndim=1), N*N (ndim=2), N*N*N (ndim=3), etc.

Related

Get access to OpenCV GpuMat channels

I am working on a raytracer. I wanted to optimize my code by saving data of each pixel in an OpenCV Mat, using GPU.
For now, I save pixel values in buffer fb which is a vector of three values (RGB):
__global__ void render(vec3 *fb, int max_x, int max_y, Camera **cam, Triangle *data, size_t n, )
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
Ray r = (*cam)->get_ray(u,v);
fb[pixel_index] = color(r, data,n);
}
and then I save data in Mat on CPU:
for (int j = ny - 1; j >= 0; j--)
{
for (int i = 0; i < nx; i++)
{
size_t pixel_index = j * nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
output.at<Vec3b>(j, i)[0] = (uchar)ib;
output.at<Vec3b>(j, i)[1] = (uchar)ig;
output.at<Vec3b>(j, i)[2] = (uchar)ir;
//std::cout << ir << " " << ig << " " << ib << "\n";
}
}
but it is a very slow process when I have a large pixel array. That why I want to use an OpenCV GpuMat and save the data directly on the GPU.
The problem is that I can't really find an example of how I can save data in each channel of GPU Mat. Is it an easy way to do it, similar to saving data on the CPU?
See the documentation. There it says
no functions that return references to their data (because references on GPU are not valid for CPU)
The only way to access the data is through the datafunction. But the pointer can only be dereferenced in (cuda)kernel code. And there is no
at function as far as I see. So you will have to calculate the offset from data.
Thank you for your answers. They make me think how to do it in another way. I am not sure if it is the best solution to deal with it but it works and in my opinion is a quiet easy way to fill matrix on GPU.
Reserve memory on GPU for matrix
Mat output(ny, nx, CV_8UC3);
const size_t numBytes = output.step * output.rows;
unsigned char *d_output;
cudaMalloc<unsigned char>(&d_output, numBytes);
Fill matrix on GPU
_global__ void render(vec3 *fb, int max_x, int max_y, Camera **cam, Triangle *data, size_t n, unsigned char* input, int step)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j * max_x + i;
int index = j * step + 3 * i;
float u = float(i) / float(max_x);
float v = float(j) / float(max_y);
Ray r = (*cam)->get_ray(u,v);
fb[pixel_index] = color(r, data,n);
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
input[index] = ib;
input[index+1] = ig;
input[index+2] = ir;
}
I will be grateful for any advice and comments to this code.

Why CUDA shared memory is slower than global memory in tiled matrix multiplication?

I have tiled matrix multiplication code with and without shared memory. Below is matrix multiplication using global memory:
__global__
void MatrixMulKernel(float* M, float* N, float* P, int Width)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
for (int k = 0; k < Width; ++k)
{
Pvalue += M[Row*Width + k] * N[k*Width + Col];
}
P[Row*Width + Col] = Pvalue;
}
}
Below is matrix multiplication using shared memory:
__global__
void MatrixMulKernel(float* d_M, float* d_N, float* d_P, int Width)
{
__shared__ float Mds[blockWidth][blockWidth];
__shared__ float Nds[blockWidth][blockWidth];
int tx = threadIdx.x; int ty = threadIdx.y;
int bx = blockIdx.x; int by = blockIdx.y;
int row = by * blockWidth + ty;
int col = bx * blockWidth + tx;
float pvalue = 0;
for (int m = 0; m < Width / blockWidth; ++m)
{
Mds[ty][tx] = d_M[row * Width + m*blockWidth + tx];
Nds[ty][tx] = d_N[(m*blockWidth + ty)*Width + col];
__syncthreads();
for (int k = 0; k < blockWidth; ++k)
{
pvalue += Mds[ty][k]*Nds[k][tx];
}
__syncthreads();
}
d_P[row*Width + col] = pvalue;
}
As much as I know using shared memory should be faster but in comparing this two codes I found code without shared memory runs about 2 seconds faster for 1600*1600 matrixes. Is there any explanation for this speed difference or something goes wrong with my code?
My teacher uses "Programming Massively Parallel Processors" Book as main text resource these two codes comes from that.
EDIT:
Configuration for Kernel:
int NumBlocks =ceil( Width / blockWidth); // blockWidth = 16
dim3 dimGrid(NumBlocks, NumBlocks,1); // Width = 1600
dim3 dimBlock(blockWidth, blockWidth,1);
clock_t startGpuCalculation = clock();
MatrixMulKernel <<<dimGrid, dimBlock >>>(d_M, d_N, d_P, Width);
cudaThreadSynchronize();
clock_t endGpuCalculation = clock();
I was Running Project In Debug Mode (VS 2017 & CUDA 9). I Run Code in Release Mode and Shared Memory Is Much Faster Than Global Memory. My Bad.

Bitonic sorting in cuda misorders some values

i'm making a sorting algorithm on CUDA for a bigger project and i decided implementing a Bitonic sorting. The number of elements i'll be sorting will be allways a power of two, in fact will be 512. I need an array which will have the final positions because this method will be used for ordering an array that represents the quality matrix of another solution.
fitness is the array i'll sort, numElements is the number of elements, and orden is initially an empty array with numElements positions which will be filled at the very beginning in this way: orden[i]=i. Actually orden is not relevant for this issue but I kept it.
My problem is that some values aren't sorted properly and until now i've been unable to figure out what problem do I have.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <ctime>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <device_functions.h>
#include "float.h"
__global__ void sorting(int * orden, float * fitness, int numElements);
// Populating array with random values for testing purposes
__global__ void populate( curandState * state, float * fitness{
curandState localState = state[threadIdx.x];
int a = curand(&localState) % 500;
fitness[threadIdx.x] = a;
}
//Curand setup for the populate method
__global__ void setup_cuRand(curandState * state, unsigned long seed)
{
int id = threadIdx.x;
curand_init(seed, id, 0, &state[id]);
}
int main()
{
float * arrayx;
int numelements = 512;
int * orden;
float arrayCPU[512] = { 0 };
curandState * state;
cudaDeviceReset();
cudaSetDevice(0);
cudaMalloc(&state, numelements * sizeof(curandState));
cudaMalloc((void **)&arrayx, numelements*sizeof(float));
cudaMalloc((void **)&orden, numelements*sizeof(int));
setup_cuRand << <1, numelements >> >(state, unsigned(time(NULL)));
populate << <1, numelements >> > (state, arrayx);
cudaMemcpy(&arrayCPU, arrayx, numelements * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < numelements; i++)
printf("fitness[%i] = %f\n", i, arrayCPU[i]);
sorting << <1, numelements >> >(orden, arrayx, numelements);
printf("\n\n");
cudaMemcpy(&arrayCPU, arrayx, numelements * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < numelements; i++)
printf("fitness[%i] = %f\n", i, arrayCPU[i]);
cudaDeviceReset();
return 0;
}
__device__ bool isValid(float n){
return !(isnan(n) || isinf(n) || n != n || n <= FLT_MIN || n >= FLT_MAX);
}
__global__ void sorting(int * orden, float * fitness, int numElements){
int i = 0;
int j = 0;
float f = 0.0;
int aux = 0;
//initial orden registered (1, 2, 3...)
orden[threadIdx.x] = threadIdx.x;
//Logarithm on base 2 of numElements
for (i = 2; i <= numElements; i = i * 2){
// descending from i reducing to half each iteration
for (j = i; j >= 2; j = j / 2){
if (threadIdx.x % j < j / 2){
__syncthreads();
// ascending or descending consideration using (threadIdx.x % (i*2) < i)
if ((threadIdx.x % (i * 2) < i) && (fitness[threadIdx.x] > fitness[threadIdx.x + j / 2] || !isValid(fitness[threadIdx.x])) ||
((threadIdx.x % (i * 2) >= i) && (fitness[threadIdx.x] <= fitness[threadIdx.x + j / 2] || !isValid(fitness[threadIdx.x + j / 2])))){
aux = orden[threadIdx.x];
orden[threadIdx.x] = orden[threadIdx.x + j / 2];
orden[threadIdx.x + j / 2] = aux;
//Se reubican los fitness
f = fitness[threadIdx.x];
fitness[threadIdx.x] = fitness[threadIdx.x + j / 2];
fitness[threadIdx.x + j / 2] = f;
}
}
}
}
}
For example, an output i got on a random execution:
A random execution
This is a representation of my bitonic sorting:
Bitonic sorting Schema, the arrows point where the worst of the values compared goes to
Here are the issues I found:
In your posted code, this does not compile:
__global__ void populate( curandState * state, float * fitness{
^
missing close parenthesis
I added a close parenthesis there.
It's not necessary to take the address of the array in these cudaMemcpy statements:
cudaMemcpy(&arrayCPU, arrayx, numelements * sizeof(float), cudaMemcpyDeviceToHost);
....
cudaMemcpy(&arrayCPU, arrayx, numelements * sizeof(float), cudaMemcpyDeviceToHost);
the array name is already the address of the array, so I removed the ampersands. If you use a dynamically allocated array, such usage would be broken.
Your usage of __syncthreads() here is broken:
for (j = i; j >= 2; j = j / 2){
if (threadIdx.x % j < j / 2){
__syncthreads();
usage of __syncthreads() inside a conditional statement is generally incorrect unless the conditional statement evaluates uniformly across the threadblock. This is covered in the documentation. We can achieve the desired effect with a slight change:
for (j = i; j >= 2; j = j / 2){
__syncthreads();
if (threadIdx.x % j < j / 2){
With the above changes, your code appears to run correctly for me, for most cases. Your usage of FLT_MIN in your validity check is also questionable, if you intend 0 (or any negative values) to be sorted correctly. Speaking generally, FLT_MIN is a number that is very small, close to zero. If you were thinking that this is a large negative number, it is not. As a result, zero is a possible output of your random number generator, and it will not be sorted correctly. I'll leave this one to you to fix, it should be straightforward, but it will depend on what you ultimately want to achieve. (If you only want to sort positive non-zero floating point values, the test may be OK, but in this case your random number generator can return 0.)

Equivalent of curand for OpenCL

I am looking at switching from nvidia to amd for my compute card because I want double precision support. Before doing this I decided to learn opencl on my nvidia card to see if I like it. I want to convert the following code from CUDA to OpenCL. I am using the curand library to generate uniformly and normally distributed random numbers. Each thread needs to be able to create a different sequence of random numbers and generate a few million per thread. Here is the code. How would I go about this in OpenCL. Everything I have read online seems to imply that I should generate a buffer of random numbers and then use that on the gpu but this is not practical for me.
template<int NArgs, typename OptimizationFunctor>
__global__
void statistical_solver_kernel(float* args_lbounds,
float* args_ubounds,
int trials,
int initial_temp,
unsigned long long seed,
float* results,
OptimizationFunctor f)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= trials)
return;
curandState rand;
curand_init(seed, idx, 0, &rand);
float x[NArgs];
for(int i = 0; i < NArgs; i++)
{
x[i] = curand_uniform(&rand) * (args_ubounds[i]- args_lbounds[i]) + args_lbounds[i];
}
float y = f(x);
for(int t = initial_temp - 1; t > 0; t--)
{
float t_percent = (float)t / initial_temp;
float x_prime[NArgs];
for(int i = 0; i < NArgs; i++)
{
x_prime[i] = curand_normal(&rand) * (args_ubounds[i] - args_lbounds[i]) * t_percent + x[i];
x_prime[i] = fmaxf(args_lbounds[i], x_prime[i]);
x_prime[i] = fminf(args_ubounds[i], x_prime[i]);
}
float y_prime = f(x_prime);
if(y_prime < y || (y_prime - y) / y_prime < t_percent)
{
y = y_prime;
for(int i = 0; i < NArgs; i++)
{
x[i] = x_prime[i];
}
}
}
float* rptr = results + idx * (NArgs + 1);
rptr[0] = y;
for(int i = 1; i <= NArgs; i++)
rptr[i] = x[i - 1];
}
The VexCL library provides an implementation of counter-based generators. You can use those inside larger expressions, see this slide for an example.
EDIT: Take this with a grain of sault, as I am the author of VexCL :).

Matrix multiplication using 1d arrays

I'm trying to multiply two matrices stored inside 1d arrays.
I'm using this function, but my program crashes, I assume due to an out of bounds error.
However, I have no (easy) ability to debug, so I have to decide if my code is correct, and to me it seems it is...
void SampleUtils::multiplyMatrices(float* matA, int rA, int cA, float* matB,
int rB, int cB, float* matC, int rC, int cC) {
for (int i = 0; i <= rA; i++) {
for (int j = 0; j <= cB; j++) {
float sum = 0.0;
for (int k = 0; k <= rB; k++)
sum = sum + matA[i * cA + k] * matB[k * cB + j];
matC[i * cC + j] = sum;
}
}
So, can anyone find out what I did wrong?
Thanks...
Chances are you mean < instead of <= in your for loops.
Try to use i < rA , j < cB, k < rB in your for