CUDA texture object problems - c++

I seem to be having some difficulty in the use of texture objects in CUDA. I took the code from here and simplified it and fleshed it out a bit. When I go to build it I get the error "type name is not allowed". It occurs on line 18 in my code, does anyone have any idea why that is the case?
#include <cuda_runtime.h>
#include <texture_fetch_functions.h>
#include <cuda_texture_types.h>
#include <texture_indirect_functions.h>
#include <cuda.h>
#include "device_launch_parameters.h"
#include <vector>
#include <iostream>
#include <cstdlib>
#include <cstring>
#define L 16384
__global__ void read(cudaTextureObject_t t, float *b){
float offset = blockIdx.x + 0.5f;
b[blockIdx.x] = tex2D<float>(t, offset, 0.5f);
}
int main(){
//device memory and host memory allocation
cudaChannelFormatDesc channelFormat = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *dev_buff_a;
float *dev_buff_b, *hst_buff, *print_buff;
hst_buff = (float *)malloc(L * sizeof(float));
print_buff = (float *)malloc(L * sizeof(float));
cudaMallocArray(&dev_buff_a, &channelFormat, L, 1);
cudaMalloc(&dev_buff_b, L * sizeof(float));
for(int i = 0; i < L; i++){
hst_buff[i] = 1.0f;
}
//
cudaMemcpyToArray(dev_buff_a, 0, 0, hst_buff, L * sizeof(float), cudaMemcpyHostToDevice);
//creating the texture object
//start with the resource descriptor
cudaResourceDesc resource;
memset(&resource, 0, sizeof(resource));
resource.resType = cudaResourceTypeArray;
resource.res.array.array = dev_buff_a;
/*resource.res.linear.desc.f = cudaChannelFormatKindFloat; //channel format
resource.res.linear.desc.x = 32; //bits per channel
resource.res.linear.sizeInBytes = L * sizeof(float);*/
//next, is the texture descriptor
cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.readMode = cudaReadModeElementType;
//to create the actual texture object
cudaTextureObject_t tObj = 0;
cudaCreateTextureObject(&tObj, &resource, &texDesc, NULL);
//perform reading function
dim3 block(1, 0, 0);
dim3 grid(16384, 0, 0);
read<<<grid, block>>>(tObj, dev_buff_b);
//copy stuff over from dev_buff_b to print
cudaMemcpy(print_buff, dev_buff_b, L * sizeof(float), cudaMemcpyDeviceToHost);
//print out the arrays and compare
std::cout << "the original array was:\n";
for(int i = 0; i < L; i++){
std::cout << "element " << i << "is: " << hst_buff[i] << "\n";
}
std::cout << "the new array is:\n";
for(int i = 0; i < L; i++){
std::cout << "element " << i << "is: " << print_buff[i] << "\n";
}
//destroy the texture object
cudaDestroyTextureObject(tObj);
//free device memory
cudaFreeArray(dev_buff_a);
cudaFree(dev_buff_b);
return 0;
}

You need to make sure to follow these two things:
You're using Cuda 5.0 or newer
Compiler settings are to compile only for devices of compute capability 3.0 or better ('-arch compute_30' flag for nvcc)
Texture objects are only available on these newer devices.

Related

OpenCL code get stuck on write output buffer

I have this peace of code in OpenCL:
std::string src = "__kernel void dot_product(__global float* weights,"
"__global float* values,"
"__global float* result,"
"__const unsigned int sz){"
"float dot = 0.f;"
"unsigned int i;"
"int current_idx = get_global_id(0);"
"unsigned int offset = current_idx * sz;"
"for( i = 0; i < sz; ++i )"
"{"
"dot += weights[ offset + i ] * values[ offset + i ];"
"}"
"result[current_idx] = dot;"
"}";
Which gets stuck on result[current_idx] = dot; If I comment out this code everything works well.
I don't get why it shall get stack.
The relevant c++ code is here:
using namespace cl;
std::array< float, CONST_INPUTS_NUMBER * CONST_NEURONS_NUMBER > in_weights;
std::array< float, CONST_INPUTS_NUMBER * CONST_NEURONS_NUMBER > in_values;
// Create a command queue and use the first device
const std::size_t size = in_weights.size();
std::vector< Device > devices =
m_context.getInfo< CL_CONTEXT_DEVICES >();
Buffer weights(m_context, CL_MEM_READ_ONLY, size * sizeof(float));
Buffer values(m_context, CL_MEM_READ_ONLY, size * sizeof(float));
Buffer product(m_context, CL_MEM_WRITE_ONLY, CONST_NEURONS_NUMBER * sizeof(float));
std::cout << __FILE__ << __LINE__ << std::endl;
// Set arguments to kernel
m_kernel.setArg(0, weights);
m_kernel.setArg(1, values);
m_kernel.setArg(2, product);
m_kernel.setArg(3, CONST_INPUTS_NUMBER);
CommandQueue queue(m_context, devices[0]);
try {
std::vector< float > dotProducts(CONST_NEURONS_NUMBER);
for(std::size_t i = 0; i < CONST_NEURONS_NUMBER; ++i) {
// Create memory buffers
for(std::size_t j = 0; j < CONST_INPUTS_NUMBER; ++j) {
const std::size_t index = i * CONST_INPUTS_NUMBER + j;
in_weights[index] = m_internal[i][j].weight;
in_values[index] = m_internal[i][j].value;
}
}
queue.enqueueWriteBuffer(weights,
CL_TRUE,
0,
in_weights.size() * sizeof(float),
in_weights.data());
queue.enqueueWriteBuffer(values,
CL_TRUE,
0,
in_values.size() * sizeof(float),
in_values.data());
for(std::size_t offset = 0; offset < CONST_NEURONS_NUMBER; ++offset) {
queue.enqueueNDRangeKernel(m_kernel,
cl::NDRange(offset),
cl::NDRange(CONST_INPUTS_NUMBER));
}
std::cout << __FILE__ << __LINE__ << std::endl;
queue.enqueueReadBuffer(product,
CL_TRUE,
0,
CONST_NEURONS_NUMBER * sizeof(float),
dotProducts.data());
std::cout << __FILE__ << __LINE__ << std::endl;
for(std::size_t i = 0; i < CONST_NEURONS_NUMBER; ++i) {
std::cout << __FILE__ << __LINE__ << std::endl;
m_internal[i].calculateOutput(dotProducts.begin(),
dotProducts.end());
}
} catch(const cl::Error& e) {
cl_int err;
cl::STRING_CLASS buildlog =
m_program.getBuildInfo< CL_PROGRAM_BUILD_LOG >(devices[0], &err);
std::cout << "Building error! Log: " << buildlog << std::endl;
}
Which gets stuck on result[current_idx] = dot; If I comment out this
code everything works well. I don't get why it shall get stack.
When you comment out the line where the calculated results are being written to the output buffer then the all calculations quite likely are being removed by optimizer leaving your kernel empty.
I think here is the problem:
for(std::size_t offset = 0; offset < CONST_NEURONS_NUMBER; ++offset) {
queue.enqueueNDRangeKernel(m_kernel, cl::NDRange(offset), cl::NDRange(CONST_INPUTS_NUMBER));
}
Specifically that you loop over enqueuing many kernels that work on the same output buffer which causes each kernel fighting for access to the same one buffer where the results are being overwritten anyway.
You need to enqueue the kernel only once without offset and with CONST_NEURONS_NUMBER global work items:
queue.enqueueNDRangeKernel(m_kernel, cl::NullRange, cl::NDRange(CONST_NEURONS_NUMBER));
CONST_INPUTS_NUMBER is already passed as a kernel argument.

Matrix calculation error appears when dimensions become large [duplicate]

This question already has answers here:
C programming, why does this large array declaration produce a segmentation fault?
(6 answers)
Closed 6 years ago.
I am running a code where I am simply creating 2 matrices: one matrix is of dimensions arows x nsame and the other has dimensions nsame x bcols. The result is an array of dimensions arows x bcols. This is fairly simple to implement using BLAS and the following code appears to work as intended when using the below master-slave model with OpenMPI:`
#include <iostream>
#include <stdio.h>
#include <iostream>
#include <cmath>
#include <mpi.h>
#include <gsl/gsl_blas.h>
using namespace std;`
int main(int argc, char** argv){
int noprocs, nid;
MPI_Status status;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &nid);
MPI_Comm_size(MPI_COMM_WORLD, &noprocs);
int master = 0;
const int nsame = 500; //must be same if matrices multiplied together = acols = brows
const int arows = 500;
const int bcols = 527; //works for 500 x 500 x 527 and 6000 x 100 x 36
int rowsent;
double buff[nsame];
double b[nsame*bcols];
double c[arows][bcols];
double CC[1*bcols]; //here ncols corresponds to numbers of rows for matrix b
for (int i = 0; i < bcols; i++){
CC[i] = 0.;
};
// Master part
if (nid == master ) {
double a [arows][nsame]; //creating identity matrix of dimensions arows x nsame (it is I if arows = nsame)
for (int i = 0; i < arows; i++){
for (int j = 0; j < nsame; j++){
if (i == j)
a[i][j] = 1.;
else
a[i][j] = 0.;
}
}
double b[nsame*bcols];//here ncols corresponds to numbers of rows for matrix b
for (int i = 0; i < (nsame*bcols); i++){
b[i] = (10.*i + 3.)/(3.*i - 2.) ;
};
MPI_Bcast(b,nsame*bcols, MPI_DOUBLE_PRECISION, master, MPI_COMM_WORLD);
rowsent=0;
for (int i=1; i < (noprocs); i++) {
// Note A is a 2D array so A[rowsent]=&A[rowsent][0]
MPI_Send(a[rowsent], nsame, MPI_DOUBLE_PRECISION,i,rowsent+1,MPI_COMM_WORLD);
rowsent++;
}
for (int i=0; i<arows; i++) {
MPI_Recv(CC, bcols, MPI_DOUBLE_PRECISION, MPI_ANY_SOURCE, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
int sender = status.MPI_SOURCE;
int anstype = status.MPI_TAG; //row number+1
int IND_I = 0;
while (IND_I < bcols){
c[anstype - 1][IND_I] = CC[IND_I];
IND_I++;
}
if (rowsent < arows) {
MPI_Send(a[rowsent], nsame,MPI_DOUBLE_PRECISION,sender,rowsent+1,MPI_COMM_WORLD);
rowsent++;
}
else { // tell sender no more work to do via a 0 TAG
MPI_Send(MPI_BOTTOM,0,MPI_DOUBLE_PRECISION,sender,0,MPI_COMM_WORLD);
}
}
}
// Slave part
else {
MPI_Bcast(b,nsame*bcols, MPI_DOUBLE_PRECISION, master, MPI_COMM_WORLD);
MPI_Recv(buff,nsame,MPI_DOUBLE_PRECISION,master,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
while(status.MPI_TAG != 0) {
int crow = status.MPI_TAG;
gsl_matrix_view AAAA = gsl_matrix_view_array(buff, 1, nsame);
gsl_matrix_view BBBB = gsl_matrix_view_array(b, nsame, bcols);
gsl_matrix_view CCCC = gsl_matrix_view_array(CC, 1, bcols);
/* Compute C = A B */
gsl_blas_dgemm (CblasNoTrans, CblasNoTrans, 1.0, &AAAA.matrix, &BBBB.matrix,
0.0, &CCCC.matrix);
MPI_Send(CC,bcols,MPI_DOUBLE_PRECISION, master, crow, MPI_COMM_WORLD);
MPI_Recv(buff,nsame,MPI_DOUBLE_PRECISION,master,MPI_ANY_TAG,MPI_COMM_WORLD,&status);
}
}
// output c here on master node //uncomment the below lines if I wish to see the output
// if (nid == master){
// if (rowsent == arows){
// // cout << rowsent;
// int IND_F = 0;
// while (IND_F < arows){
// int IND_K = 0;
// while (IND_K < bcols){
// cout << "[" << IND_F << "]" << "[" << IND_K << "] = " << c[IND_F][IND_K] << " ";
// IND_K++;
// }
// cout << "\n";
// IND_F++;
// }
// }
// }
MPI_Finalize();
//free any allocated space here
return 0;
};
Now what appears odd is that when I increase size of the matrices (e.g. from nsame = 500 to nsame = 501), the code no longer works. I receive the following error:
mpirun noticed that process rank 0 with PID 0 on node Users-MacBook-Air exited on signal 11 (Segmentation fault: 11).
I have tried this with other combinations of sizes for the matrices and there always appears to be an upper limit for the size of the matrices themselves (which seems to vary based on how I vary the different dimensions themselves). I have also tried modifying the values of the matrices themselves although this does not appear to change anything. I realize there are alternative ways to initialize the matrices in my example (e.g. using vector) but am simply wondering why my current scheme of multiplying matrices of arbitrary size seems to only work to a certain extent.
You're declaring too many big local variables, which is causing stack space related problems. a, in particular, is 500x500 doubles (250000 8 byte elements, or 2 million bytes). b is even larger.
You'll need to dynamically allocate space for some or all of those arrays.
There might be a compiler option to increase the initial stack space but that isn't a good long term solution.

Thrust CUDA find maximum per each group(segment)

My data like
value = [1, 2, 3, 4, 5, 6]
key = [0, 1, 0, 2, 1, 2]
I need to now maximum(value and index) per each group(key).
So the result should be
max = [3, 5, 6]
index = [2, 4, 5]
key = [0, 1, 2]
How can I get it with cuda thrust?
I can do sort -> reduce_by_key but it's not really efficient. In my case vector size > 10M and key space ~ 1K(starts from 0 without gaps).
Since the original question focused on thrust, I didn't have any suggestions other than what I mentioned in the comments,
However, based on further dialog in the comments, I thought I would post an answer that covers both CUDA and thrust.
The thrust method uses a sort_by_key operation to group like keys together, followed by a reduce_by_key operation to find the max + index for each key-group.
The CUDA method uses a custom atomic approach I describe here to find a 32-bit max plus 32-bit index (for each key-group).
The CUDA method is substantially (~10x) faster, for this specific test case. I used a vector size of 10M and a key size of 10K for this test.
My test platform was CUDA 8RC, RHEL 7, and Tesla K20X GPU. K20X is a member of the Kepler generation which has much faster global atomics than previous GPU generations.
Here's a fully worked example, covering both cases, and providing a timing comparison:
$ cat t1234.cu
#include <iostream>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/sequence.h>
#include <thrust/functional.h>
#include <cstdlib>
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
unsigned long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
const size_t ksize = 10000;
const size_t vsize = 10000000;
const int nTPB = 256;
struct my_max_func
{
template <typename T1, typename T2>
__host__ __device__
T1 operator()(const T1 t1, const T2 t2){
T1 res;
if (thrust::get<0>(t1) > thrust::get<0>(t2)){
thrust::get<0>(res) = thrust::get<0>(t1);
thrust::get<1>(res) = thrust::get<1>(t1);}
else {
thrust::get<0>(res) = thrust::get<0>(t2);
thrust::get<1>(res) = thrust::get<1>(t2);}
return res;
}
};
typedef union {
float floats[2]; // floats[0] = maxvalue
int ints[2]; // ints[1] = maxindex
unsigned long long int ulong; // for atomic update
} my_atomics;
__device__ unsigned long long int my_atomicMax(unsigned long long int* address, float val1, int val2)
{
my_atomics loc, loctest;
loc.floats[0] = val1;
loc.ints[1] = val2;
loctest.ulong = *address;
while (loctest.floats[0] < val1)
loctest.ulong = atomicCAS(address, loctest.ulong, loc.ulong);
return loctest.ulong;
}
__global__ void my_max_idx(const float *data, const int *keys,const int ds, my_atomics *res)
{
int idx = (blockDim.x * blockIdx.x) + threadIdx.x;
if (idx < ds)
my_atomicMax(&(res[keys[idx]].ulong), data[idx],idx);
}
int main(){
float *h_vals = new float[vsize];
int *h_keys = new int[vsize];
for (int i = 0; i < vsize; i++) {h_vals[i] = rand(); h_keys[i] = rand()%ksize;}
// thrust method
thrust::device_vector<float> d_vals(h_vals, h_vals+vsize);
thrust::device_vector<int> d_keys(h_keys, h_keys+vsize);
thrust::device_vector<int> d_keys_out(ksize);
thrust::device_vector<float> d_vals_out(ksize);
thrust::device_vector<int> d_idxs(vsize);
thrust::device_vector<int> d_idxs_out(ksize);
thrust::sequence(d_idxs.begin(), d_idxs.end());
cudaDeviceSynchronize();
unsigned long long et = dtime_usec(0);
thrust::sort_by_key(d_keys.begin(), d_keys.end(), thrust::make_zip_iterator(thrust::make_tuple(d_vals.begin(), d_idxs.begin())));
thrust::reduce_by_key(d_keys.begin(), d_keys.end(), thrust::make_zip_iterator(thrust::make_tuple(d_vals.begin(),d_idxs.begin())), d_keys_out.begin(), thrust::make_zip_iterator(thrust::make_tuple(d_vals_out.begin(), d_idxs_out.begin())), thrust::equal_to<int>(), my_max_func());
cudaDeviceSynchronize();
et = dtime_usec(et);
std::cout << "Thrust time: " << et/(float)USECPSEC << "s" << std::endl;
// cuda method
float *vals;
int *keys;
my_atomics *results;
cudaMalloc(&keys, vsize*sizeof(int));
cudaMalloc(&vals, vsize*sizeof(float));
cudaMalloc(&results, ksize*sizeof(my_atomics));
cudaMemset(results, 0, ksize*sizeof(my_atomics)); // works because vals are all positive
cudaMemcpy(keys, h_keys, vsize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(vals, h_vals, vsize*sizeof(float), cudaMemcpyHostToDevice);
et = dtime_usec(0);
my_max_idx<<<(vsize+nTPB-1)/nTPB, nTPB>>>(vals, keys, vsize, results);
cudaDeviceSynchronize();
et = dtime_usec(et);
std::cout << "CUDA time: " << et/(float)USECPSEC << "s" << std::endl;
// verification
my_atomics *h_results = new my_atomics[ksize];
cudaMemcpy(h_results, results, ksize*sizeof(my_atomics), cudaMemcpyDeviceToHost);
for (int i = 0; i < ksize; i++){
if (h_results[i].floats[0] != d_vals_out[i]) {std::cout << "value mismatch at index: " << i << " thrust: " << d_vals_out[i] << " CUDA: " << h_results[i].floats[0] << std::endl; return -1;}
if (h_results[i].ints[1] != d_idxs_out[i]) {std::cout << "index mismatch at index: " << i << " thrust: " << d_idxs_out[i] << " CUDA: " << h_results[i].ints[1] << std::endl; return -1;}
}
std::cout << "Success!" << std::endl;
return 0;
}
$ nvcc -arch=sm_35 -o t1234 t1234.cu
$ ./t1234
Thrust time: 0.026593s
CUDA time: 0.002451s
Success!
$

C++: Allocating memory in OpenGL drawing loop causes memory corruption

I have an Open GL ES main loop, which calls 2 functions; draw and update, once each per loop.
The following is the code from the draw loop:
float* sample_data = userdata->sample_data;
int fft_length = userdata->fft_length;
cout << 'a' << endl;
cout << fft_length << endl;
GLfloat* points = new GLfloat[3 * fft_length];
cout << 'b' << endl;
// Draw tingz
for(int ix = 0; ix < fft_length; ++ ix)
{
float ratio = float(ix) / float(fft_length - 1);
float x = -1.0 + 2.0 * ratio;
float y = sample_data[ix];
points[3 * ix + 0] = x;
points[3 * ix + 1] = y;
points[3 * ix + 2] = 0.0;
}
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, points);
glEnableVertexAttribArray(0);
glDrawArrays(GL_LINE_STRIP, 0, fft_length);
std::cout << 'd' << std::endl;
delete [] points;
std::cout << 'e' << std::endl;
// End of function
The output prints everything once, but then only prints a and 256, which is the length of the fft.
Hence I assume it is the call to 'new' where the problem occurs. The next line of text on the screen which appears is:
*** glibc detected *** ./main.out: malloc(): memory corruption: 0x01319648 ***
This puzzles me, as I can't immediately see anything wrong. I allocate memory and delete it again. My guess is I made a stupid mistake which I just can't spot?

CUFFT : How to calculate the fft when the input is a pitched array

I'm trying to find the fft of a dynamically allocated array. The input array is copied from host to device using cudaMemcpy2D. Then the fft is taken (cufftExecR2C) and the results are copied back from device to host.
So my initial problem was how to use the pitch information in the fft. Then I found an answer here - CUFFT: How to calculate fft of pitched pointer?
But unfortunately it doesn't work. The results I get are garbage values. Given below is my code.
#define NRANK 2
#define BATCH 10
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <stdio.h>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace std;
const size_t NX = 4;
const size_t NY = 6;
int main()
{
// Input array (static) - host side
float h_in_data_static[NX][NY] ={
{0.7943 , 0.6020 , 0.7482 , 0.9133 , 0.9961 , 0.9261},
{0.3112 , 0.2630 , 0.4505 , 0.1524 , 0.0782 , 0.1782},
{0.5285 , 0.6541 , 0.0838 , 0.8258 , 0.4427, 0.3842},
{0.1656 , 0.6892 , 0.2290 , 0.5383 , 0.1067, 0.1712}
};
// --------------------------------
// Input array (dynamic) - host side
float *h_in_data_dynamic = new float[NX*NY];
// Set the values
size_t h_ipitch;
for (int r = 0; r < NX; ++r) // this can be also done on GPU
{
for (int c = 0; c < NY; ++c)
{ h_in_data_dynamic[NY*r + c] = h_in_data_static[r][c]; }
}
// --------------------------------
// Output array - host side
float2 *h_out_data_temp = new float2[NX*(NY/2+1)] ;
// Input and Output array - device side
cufftHandle plan;
cufftReal *d_in_data;
cufftComplex * d_out_data;
int n[NRANK] = {NX, NY};
// Copy input array from Host to Device
size_t ipitch;
cudaError cudaStat1 = cudaMallocPitch((void**)&d_in_data,&ipitch,NY*sizeof(cufftReal),NX);
cout << cudaGetErrorString(cudaStat1) << endl;
cudaError cudaStat2 = cudaMemcpy2D(d_in_data,ipitch,h_in_data_dynamic,NY*sizeof(float),NY*sizeof(float),NX,cudaMemcpyHostToDevice);
cout << cudaGetErrorString(cudaStat2) << endl;
// Allocate memory for output array - device side
size_t opitch;
cudaError cudaStat3 = cudaMallocPitch((void**)&d_out_data,&opitch,(NY/2+1)*sizeof(cufftComplex),NX);
cout << cudaGetErrorString(cudaStat3) << endl;
// Performe the fft
int rank = 2; // 2D fft
int istride = 1, ostride = 1; // Stride lengths
int idist = 1, odist = 1; // Distance between batches
int inembed[] = {ipitch, NX}; // Input size with pitch
int onembed[] = {opitch, NX}; // Output size with pitch
int batch = 1;
cufftPlanMany(&plan, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_R2C, batch);
//cufftPlan2d(&plan, NX, NY , CUFFT_R2C);
cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE);
cufftExecR2C(plan, d_in_data, d_out_data);
cudaThreadSynchronize();
// Copy d_in_data back from device to host
cudaError cudaStat4 = cudaMemcpy2D(h_out_data_temp,(NY/2+1)*sizeof(float2), d_out_data, opitch, (NY/2+1)*sizeof(cufftComplex), NX, cudaMemcpyDeviceToHost);
cout << cudaGetErrorString(cudaStat4) << endl;
// Print the results
for (int i = 0; i < NX; i++)
{
for (int j =0 ; j< NY/2 + 1; j++)
printf(" %f + %fi",h_out_data_temp[i*(NY/2+1) + j].x ,h_out_data_temp[i*(NY/2+1) + j].y);
printf("\n");
}
cudaFree(d_in_data);
return 0;
}
I think the problem is in cufftPlanMany. How can I solve this issue ?
You may want to study the advanced data layout section of the documentation carefully.
I think the previous question that was linked is somewhat confusing because that question is passing the width and height parameters in reverse order for what I would expect for a cufft 2D plan. However the answer then mimics that order so it is at least consistent.
Secondly, you missed in the previous question that the "pitch" parameters that are being passed in inembed and onembed are not the same as the pitch parameters that you would receive from a cudaMallocPitch operation. They have to be scaled by the number of bytes per data element in the input and output data sets. I'm actually not entirely sure this is the intended use of the inembed and onembed parameters, but it seems to work.
When I adjust your code to account for the above two changes, I seem to get valid results, at least they appear to be in a reasonable range. You've posted several questions now about 2D FFTs, where you've said the results are not correct. I can't do these 2D FFT's in my head, so I suggest in the future you indicate what data you are expecting.
This has the changes I made:
#define NRANK 2
#define BATCH 10
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <stdio.h>
#include <iomanip>
#include <iostream>
#include <vector>
using namespace std;
const size_t NX = 4;
const size_t NY = 6;
int main()
{
// Input array (static) - host side
float h_in_data_static[NX][NY] ={
{0.7943 , 0.6020 , 0.7482 , 0.9133 , 0.9961 , 0.9261},
{0.3112 , 0.2630 , 0.4505 , 0.1524 , 0.0782 , 0.1782},
{0.5285 , 0.6541 , 0.0838 , 0.8258 , 0.4427, 0.3842},
{0.1656 , 0.6892 , 0.2290 , 0.5383 , 0.1067, 0.1712}
};
// --------------------------------
// Input array (dynamic) - host side
float *h_in_data_dynamic = new float[NX*NY];
// Set the values
size_t h_ipitch;
for (int r = 0; r < NX; ++r) // this can be also done on GPU
{
for (int c = 0; c < NY; ++c)
{ h_in_data_dynamic[NY*r + c] = h_in_data_static[r][c]; }
}
// --------------------------------
int owidth = (NY/2)+1;
// Output array - host side
float2 *h_out_data_temp = new float2[NX*owidth] ;
// Input and Output array - device side
cufftHandle plan;
cufftReal *d_in_data;
cufftComplex * d_out_data;
int n[NRANK] = {NX, NY};
// Copy input array from Host to Device
size_t ipitch;
cudaError cudaStat1 = cudaMallocPitch((void**)&d_in_data,&ipitch,NY*sizeof(cufftReal),NX);
cout << cudaGetErrorString(cudaStat1) << endl;
cudaError cudaStat2 = cudaMemcpy2D(d_in_data,ipitch,h_in_data_dynamic,NY*sizeof(float),NY*sizeof(float),NX,cudaMemcpyHostToDevice);
cout << cudaGetErrorString(cudaStat2) << endl;
// Allocate memory for output array - device side
size_t opitch;
cudaError cudaStat3 = cudaMallocPitch((void**)&d_out_data,&opitch,owidth*sizeof(cufftComplex),NX);
cout << cudaGetErrorString(cudaStat3) << endl;
// Performe the fft
int rank = 2; // 2D fft
int istride = 1, ostride = 1; // Stride lengths
int idist = 1, odist = 1; // Distance between batches
int inembed[] = {NX, ipitch/sizeof(cufftReal)}; // Input size with pitch
int onembed[] = {NX, opitch/sizeof(cufftComplex)}; // Output size with pitch
int batch = 1;
if ((cufftPlanMany(&plan, rank, n, inembed, istride, idist, onembed, ostride, odist, CUFFT_R2C, batch)) != CUFFT_SUCCESS) cout<< "cufft error 1" << endl;
//cufftPlan2d(&plan, NX, NY , CUFFT_R2C);
if ((cufftSetCompatibilityMode(plan, CUFFT_COMPATIBILITY_NATIVE)) != CUFFT_SUCCESS) cout << "cufft error 2" << endl;
if ((cufftExecR2C(plan, d_in_data, d_out_data)) != CUFFT_SUCCESS) cout << "cufft error 3" << endl;
cudaDeviceSynchronize();
// Copy d_in_data back from device to host
cudaError cudaStat4 = cudaMemcpy2D(h_out_data_temp,owidth*sizeof(float2), d_out_data, opitch, owidth*sizeof(cufftComplex), NX, cudaMemcpyDeviceToHost);
cout << cudaGetErrorString(cudaStat4) << endl;
// Print the results
for (int i = 0; i < NX; i++)
{
for (int j =0 ; j< owidth; j++)
printf(" %f + %fi",h_out_data_temp[i*owidth + j].x ,h_out_data_temp[i*owidth + j].y);
printf("\n");
}
cudaFree(d_in_data);
return 0;
}