I am having trouble trying to make a CUDA program manage an array of lambdas by their index. An example code that reproduces the problem
#include <cuda.h>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cassert>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template<typename Lambda>
__global__ void kernel(Lambda f){
int t = blockIdx.x * blockDim.x + threadIdx.x;
printf("device: thread %i: ", t);
printf("f() = %i\n", f() );
}
int main(int argc, char **argv){
// arguments
if(argc != 2){
fprintf(stderr, "run as ./prog i\nwhere 'i' is function index");
exit(EXIT_FAILURE);
}
int i = atoi(argv[1]);
// lambdas
auto lam0 = [] __host__ __device__ (){ return 333; };
auto lam1 = [] __host__ __device__ (){ return 777; };
// make vector of functions
std::vector<int(*)()> v;
v.push_back(lam0);
v.push_back(lam1);
// host: calling a function by index
printf("host: f() = %i\n", (*v[i])() );
// device: calling a function by index
kernel<<< 1, 1 >>>( v[i] ); // does not work
//kernel<<< 1, 1 >>>( lam0 ); // does work
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
return EXIT_SUCCESS;
}
Compiling with
nvcc -arch sm_60 -std=c++11 --expt-extended-lambda main.cu -o prog
The error I get when running is
➜ cuda-lambda ./prog 0
host: f() = 333
device: GPUassert: invalid program counter main.cu 53
It seems that CUDA cannot manage the int(*)() function pointer form (while host c++ does work properly). On the other hand, each lambda is managed as a different data type, no matter if they are identical in code and have the same contract. Then, how can we achieve function by index in CUDA?
There are a few considerations here.
Although you suggest wanting to "manage an array of lambdas", you are actually relying on the graceful conversion of a lambda to a function pointer (possible when the lambda does not capture).
When you mark something as __host__ __device__, you are declaring to the compiler that two copies of said item need to be compiled (with two obviously different entry points): one for the CPU, and one for the GPU.
When we take a __host__ __device__ lambda and ask it to degrade to a function pointer, we are then left with the question "which function pointer (entry point) to choose?" The compiler no longer has the option to carry about the experimental lambda object anymore, and so it must choose one or the other (host or device, CPU or GPU) for your vector. Whichever one it chooses, the vector could (will) break if used in the wrong environment.
One takeaway from this is that your two test cases are not the same. In one case (broken) you are passing a function pointer to the kernel (so the kernel is templated to accept a function pointer argument) and in the other case (working) you are passing a lambda to the kernel (so the kernel is templated to accept a lambda argument).
The problem here, in my view, is not simply arising out of use of a container, but arising out of the type of container you are using. I can demonstrate this in a simple way (see below) by converting your vector to a vector of actual lambda type. In that case, we can make the code "work" (sort of), but since every lambda has a unique type, this is an uninteresting demonstration. We can create a multi-element vector, but the only element we can store in it is one of your two lambdas (not both at the same time).
If we use a container that can handle dissimilar types (e.g. std::tuple), perhaps we can make some progress here, but I know of no direct method to index through the elements of such a container. Even if we could, the template kernel accepting lambda as argument/template type would have to be instantiated for each lambda.
In my view, function pointers avoid this particular type "messiness".
Therefore, as an answer to this question:
Then, how can we achieve function by index in CUDA?
I would suggest for the time being that function by index in host code be separated (e.g. two separate containers) from function by index in device code, and for function by index in device code, you use any of the techniques (which don't use or depend on lambdas) covered in other questions, such as this one.
Here is a worked example (I think) demonstrating the note above, that we can create a vector of lambda "type", and use the resultant element(s) from that vector as lambdas in both host and device code:
$ cat t64.cu
#include <cuda.h>
#include <vector>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cassert>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n",
cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template<typename Lambda>
__global__ void kernel(Lambda f){
int t = blockIdx.x * blockDim.x + threadIdx.x;
printf("device: thread %i: ", t);
printf("f() = %i\n", f() );
}
template <typename T>
std::vector<T> fill(T L0, T L1){
std::vector<T> v;
v.push_back(L0);
v.push_back(L1);
return v;
}
int main(int argc, char **argv){
// arguments
if(argc != 2){
fprintf(stderr, "run as ./prog i\nwhere 'i' is function index");
exit(EXIT_FAILURE);
}
int i = atoi(argv[1]);
// lambdas
auto lam0 = [] __host__ __device__ (){ return 333; };
auto lam1 = [] __host__ __device__ (){ return 777; };
auto v = fill(lam0, lam0);
// make vector of functions
// std::vector< int(*)()> v;
// v.push_back(lam0);
// v.push_back(lam1);
// host: calling a function by index
// host: calling a function by index
printf("host: f() = %i\n", (*v[i])() );
// device: calling a function by index
kernel<<< 1, 1 >>>( v[i] ); // does not work
//kernel<<< 1, 1 >>>( lam0 ); // does work
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
return EXIT_SUCCESS;
}
$ nvcc -arch sm_61 -std=c++11 --expt-extended-lambda t64.cu -o t64
$ cuda-memcheck ./t64 0
========= CUDA-MEMCHECK
host: f() = 333
device: thread 0: f() = 333
========= ERROR SUMMARY: 0 errors
$ cuda-memcheck ./t64 1
========= CUDA-MEMCHECK
host: f() = 333
device: thread 0: f() = 333
========= ERROR SUMMARY: 0 errors
$
As mentioned above already, this code is not a sensible code. It is advanced to prove a particular point.
Related
I thought I knew how to write some clean cuda code. Until I tried to make a simple template class and use it in a simple kernel.
I've been trouble shooting for days. Every single thread I've visited made me feel a little more stupid.
For error checking I used this
Here is my class.h:
#pragma once
template <typename T>
class MyArray
{
public:
const int size;
T *data;
__host__ MyArray(int size); //gpuErrchk(cudaMalloc(&data, size * sizeof(T)));
__device__ __host__ T GetValue(int); //return data[i]
__device__ __host__ void SetValue(T, int); //data[i] = val;
__device__ __host__ T& operator()(int); //return data[i];
~MyArray(); //gpuErrchk(cudaFree(data));
};
template class MyArray<double>;
The relevant content of class.cu is in the comments. If you think the whole thing is relevant Id be happy to add it.
Now for the main class:
__global__ void test(MyArray<double> array, double *data, int size)
{
int j = threadIdx.x;
//array.SetValue(1, j); //doesn't work
//array(j) = 1; //doesn't work
//array.data[j] = 1; //doesn't work
data[j] = 1; //This does work !
printf("Reach this code\n");
}
}
int main(int argc, char **argv)
{
MyArray x(20);
test<<<1, 20>>>(x, x.data, 20);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
When I say "doesn't work", I mean that the program stops there (before reaching the printf) without outputting any error. Plus I get the following error both from cudaDeviceSynchronize and from cudaFree:
an illegal memory access was encountered
What I can't understand is that there should be no issue with memory management since sending the array directly to the kernel works fine. So why doesn't it work when I send a class and try to access the classes data? And why do I receive no warning or error message when clearly my code bumped into some error?
Here is the output of nvcc --version
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2017 NVIDIA Corporation
Built on Fri_Nov__3_21:07:56_CDT_2017
Cuda compilation tools, release 9.1, V9.1.85
(Editorial note: there is quite a bit of disinformation in the comments on this question, so I have assembled an answer as a community wiki entry.)
There is no particular reason why a template class cannot be passed as an argument to a kernel. There are some limitations which need to be clearly understood before doing so:
CUDA kernel arguments, for all intent and purpose, always passed by value. Pass by reference is supported under extremely limited set of circumstances (the argument in question must be stored in managed memory). This does not apply here.
As a result of (1), POD arguments just work, because they are trivially copyable and rely on no special behaviour
Classes are different, in that when you pass a class by value, you are implicitly invoking copy construction or move construction semantics. That means that classes passed by value as kernel arguments must be trivially copy constructable. There is no way to run non trivial copy constructors on the device as part of a kernel launch.
CUDA further requires that classes don't contain virtual members
Although the <<< >>> kernel launch syntax looks like a simple function call, it isn't. There is several layers of abstraction boilerplate and a API call between what you write in host code and what actually is emitted by the toolchain on the host side. This means that there are several copy construction operations between your code and the GPU. If you do something like put a cudaFree call in your destructor, you should assume that it will get called as part of the function call sequence which launches a kernel when one of those copies falls out of scope. You do not want that.
You did not show how the class member functions were actually implemented in this case, so saying why one of the many permutations your code comments hinted at did or did not work is impossible, beyond passing the raw pointer to the kernel, which works because it is a trivially copyable POD value, when the class was almost certainly not.
Here is a simple, complete example showing how to make this work:
$cat classy.cu
#include <vector>
#include <iostream>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <typename T>
class MyArray
{
public:
int len;
T *data;
__device__ __host__ void SetValue(T val, int i) { data[i] = val; };
__device__ __host__ int size() { return sizeof(T) * len; };
__host__ void DevAlloc(int N) {
len = N;
gpuErrchk(cudaMalloc(&data, size()));
};
__host__ void DevFree() {
gpuErrchk(cudaFree(data));
len = -1;
};
};
__global__ void test(MyArray<double> array, double val)
{
int j = threadIdx.x;
if (j < array.len)
array.SetValue(val, j);
}
int main(int argc, char **argv)
{
const int N = 20;
const double val = 5432.1;
gpuErrchk(cudaSetDevice(0));
gpuErrchk(cudaFree(0));
MyArray<double> x;
x.DevAlloc(N);
test<<<1, 32>>>(x, val);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
std::vector<double> y(N);
gpuErrchk(cudaMemcpy(&y[0], x.data, x.size(), cudaMemcpyDeviceToHost));
x.DevFree();
for(int i=0; i<N; ++i) std::cout << i << " = " << y[i] << std::endl;
return 0;
}
which compiles and runs like so:
$ nvcc -std=c++11 -arch=sm_53 -o classy classy.cu
$ cuda-memcheck ./classy
========= CUDA-MEMCHECK
0 = 5432.1
1 = 5432.1
2 = 5432.1
3 = 5432.1
4 = 5432.1
5 = 5432.1
6 = 5432.1
7 = 5432.1
8 = 5432.1
9 = 5432.1
10 = 5432.1
11 = 5432.1
12 = 5432.1
13 = 5432.1
14 = 5432.1
15 = 5432.1
16 = 5432.1
17 = 5432.1
18 = 5432.1
19 = 5432.1
========= ERROR SUMMARY: 0 errors
(CUDA 10.2/gcc 7.5 on a Jetson Nano)
Note that I have included host side functions for allocation and deallocation which do not interact with the constructor and destructor. Otherwise the class is extremely similar to your design and has the same properties.
I'm a bit of a newcomer to CUDA and thrust. I seem to be unable to get the thrust::for_each algorithm to work when supplied with a counting_iterator.
Here is my simple functor:
struct print_Functor {
print_Functor(){}
__host__ __device__
void operator()(int i)
{
printf("index %d\n", i);
}
};
Now if I call this with a host-vector prefilled with a sequence, it works fine:
thrust::host_vector<int> h_vec(10);
thrust::sequence(h_vec.begin(),h_vec.end());
thrust::for_each(h_vec.begin(),h_vec.end(), print_Functor());
However, if I try to do this with thrust::counting_iterator it fails:
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first+10;
for(thrust::counting_iterator<int> it=first;it!=last;it++)
printf("Value %d\n", *it);
printf("Launching for_each\n");
thrust::for_each(first,last,print_Functor());
What I get is that the for loop executes correctly, but the for_each fails with the error message:
after cudaFuncGetAttributes: unspecified launch failure
I tried to do this by making the iterator type a template argument:
thrust::for_each<thrust::counting_iterator<int>>(first,last, print_Functor());
but the same error results.
For completeness, I'm calling this from a MATLAB mex file (64 bit).
I've been able to get other thrust algorithms to work with the counting iterator (e.g. thrust::reduce gives the right result).
As a newcomer I'm probably doing something really stupid and missing something obvious - can anyone help?
Thanks for the comments so far. I have taken on board the comments so far. The worked example (outside Matlab) worked correctly and produced output, but if this was made into a mex file it still did not work - the first time producing no output at all and the second time just producing the same error message as before (only fixed by a recompile, when it goes back to no output).
However there is a similar problem with it not executing the functor from thrust::for_each even under DOS. Here is a complete example:
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
struct sum_Functor {
int *sum;
sum_Functor(int *s){sum = s;}
__host__ __device__
void operator()(int i)
{
*sum+=i;
printf("In functor: i %d sum %d\n",i,*sum);
}
};
int main(){
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first+10;
int sum = 0;
sum_Functor sf(&sum);
printf("After constructor: value is %d\n", *(sf.sum));
for(int i=0;i<5;i++){
sf(i);
}
printf("Initiating for_each call - current value %d\n", (*(sf.sum)));
thrust::for_each(first,last,sf);
cudaDeviceSynchronize();
printf("After for_each: value is %d\n",*(sf.sum));
}
This is compiled under a DOS prompt with:
nvcc -o pf pf.cu
The output produced is:
After constructor: value is 0
In functor: i 0 sum 0
In functor: i 1 sum 1
In functor: i 2 sum 3
In functor: i 3 sum 6
In functor: i 4 sum 10
Initiating for_each call - current value 10
After for_each: value is 10
In other words the functor's overloaded operator() is called correctly from the for loop but is never called by the thrust::for_each algorithm. The only way to get the for_each to execute the functor when using the counting iterator is to omit the member variable.
( I should add that after years of using pure Matlab, my C++ is very rusty, so I could be missing something obvious ...)
On your comments you say that you want your code to be executed on host side.
The error code "unspecified launch failure", and the fact your functor is defined as host device make me think thrust wants to execute on your device.
Can you add an execution policy to be sure where your code is executed ?
replace :
thrust::for_each(first,last,sf);
with
thrust::for_each(thrust::host, first,last,sf);
To be able to run on the GPU, your result must be allocated on device memory (through cudaMalloc) then copied back to host.
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <thrust/for_each.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/execution_policy.h>
struct sum_Functor {
int *sum;
sum_Functor(int *s){sum=s;}
__host__ __device__
void operator()(int i)
{
atomicAdd(sum, 1);
}
};
int main(int argc, char**argv){
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first+atoi(argv[1]);
int *d_sum;
int h_sum = 0;
cudaMalloc(&d_sum,sizeof(int));
cudaMemcpy(d_sum,&h_sum,sizeof(int),cudaMemcpyHostToDevice);
thrust::for_each(thrust::device,first,last,sum_Functor(d_sum));
cudaDeviceSynchronize();
cudaMemcpy(&h_sum,d_sum,sizeof(int),cudaMemcpyDeviceToHost);
printf("sum = %d\n", *h_sum);
cudaFree(d_sum);
}
Code Update : To have the correct result on your device you must use an atomic operation.
I would like to implement a device side vector class which encapsulates a pointer to the elements of the container.
After I instantiate an object of this class I have no access to the inside pointer. It always says 'Access violation writing location some device memory address'.
My code is the following:
#include <iostream>
#include <cuda_runtime.h>
template <typename T>
class DeviceVector
{
private:
T* m_bValues;
std::size_t m_bSize;
public:
__host__
void* operator new(std::size_t size)
{
DeviceVector<T>* object = nullptr;
cudaMalloc((void**)&object, size);
return object;
}
__host__
void operator delete(void* object)
{
cudaFree(object);
}
__host__
DeviceVector(std::size_t size = 1)
{
cudaMemcpy(&m_bSize, &size, sizeof(std::size_t), cudaMemcpyHostToDevice);
// At this cudaMalloc I get Access violation writing location...
cudaMalloc((void**)&m_bValues, size * sizeof(T));
// It's an alternative solution here
T* ptr;
cudaMalloc((void**)&ptr, size * sizeof(T));
cudaMemcpy(&m_bValues, &ptr, sizeof(T*), cudaMemcpyHostToDevice);
// The memory is allocated
// But I can't access it through m_bValues pointer
// It is also Access violation writing location...
}
__host__
~DeviceVector()
{
// Access violation here if I use the second solution in the constructor
cudaFree(m_bValues);
}
};
int main()
{
DeviceVector<int>* vec = new DeviceVector<int>();
delete vec;
return 0;
}
Note:
I have access to the size attribute.
So my questions are:
How to allocate memory for this class to get access to the pointer inside?
Is this even possible to encapsulate a pointer into a class on the device?
This line is illegal:
cudaMalloc((void**)&m_bValues, size * sizeof(T));
because your new operator allocated the object on the device:
cudaMalloc((void**)&object, size);
return object;
and the constructor was called to operate on that allocation. Therefore &m_bValues is taking the address of a device variable in host code which is illegal in CUDA. If you do that, and then attempt to use it in host code (i.e. the cudaMalloc operation), you're going to get a seg fault. cudaMalloc creates a device allocation of a particular size, and then stores the device pointer to that allocation in a variable that is expected to be resident on the host. If you pass it a device address to store that pointer into instead, cudaMalloc will segfault trying to write the pointer value.
Your alternative solution is a somewhat better approach, and is the general idea when it's necessary to copy a pointer to a device allocation to a variable resident on the device.
But you've still basically made the allocation that m_bValues points to inaccessible from the host. (ptr, being a temporary variable, won't help, and creating another variable in the class to hold a value like ptr won't help either because the entire class is allocated and resident on the device.) For the same reason that you're not allowed to use &m_bValues in the previous cudaMalloc operation, you won't be able to use it directly in any other host code (except as the target for cudaMempcy host->device when copying the pointer value itself).
I don't think there are any simple fixes for this. I suggest re-crafting the object to live on the host, and provide appropriate host- and device-side allocations for corresponding pointers and parameters (like size).
It also seems like you're re-inventing the wheel. You might want to investigate thrust device vectors (which are easily usable with ordinary CUDA code.)
Anyway, this was the closest I could come up with:
#include <iostream>
#include <cuda_runtime.h>
#include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
template <typename T>
class DeviceVector
{
private:
T* m_bValues;
std::size_t m_bSize;
std::size_t eleSize;
public:
__host__
void* operator new(std::size_t size)
{
DeviceVector<T>* object = NULL;
object = (DeviceVector<T> *)malloc(size*sizeof(DeviceVector<T>));
return object;
}
__host__
void operator delete(void* object)
{
free(object);
}
__host__
DeviceVector(std::size_t size = 1)
{
m_bSize = size;
eleSize = sizeof(T);
cudaMalloc(&m_bValues, m_bSize*sizeof(T));
cudaCheckErrors("constructor cudaMalloc fail");
cudaMemset(m_bValues, 0, m_bSize*sizeof(T));
}
__host__
~DeviceVector()
{
cudaFree(m_bValues);
cudaCheckErrors("destructor cudaFree fail");
}
__host__
T* getDevPtr(){
return m_bValues;}
__host__
std::size_t getSize(){
return m_bSize;}
__host__
std::size_t geteleSize(){
return eleSize;}
};
int main()
{
DeviceVector<int>* vec = new DeviceVector<int>();
cudaMemset(vec->getDevPtr(), 0xFF, vec->getSize()*vec->geteleSize());
cudaCheckErrors("vector fill fail");
delete vec;
return 0;
}
You've shown very little about how you want to interact with an object of this class, so I'm just guessing here.
I've been spending a lot of time trying to figure out the cause of this problem. The following code attempts to generate a sequence of normally distributed random variables using curand on the device. It seems to generate a few successfully, but then crashes with an "illegal memory address was encountered error". Any help is much appreciated.
main.cu
#include <stdio.h>
#include <cuda.h>
#include <curand_kernel.h>
class A {
public:
__device__ A(const size_t& seed) {
printf("\nA()");
curandState state;
curand_init(seed, 0, 0, &state);
for(size_t i = 0; i < 1000; ++i)
printf("\n%f", curand_normal(&state));
}
__device__ ~A() { printf("\n~A()"); }
};
/// Kernel
__global__ void kernel(const size_t& seed) {
printf("\nHello from Kernel...");
A a(seed);
return;
}
int main(void) {
kernel<<<1,1>>>(1);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != CUDA_SUCCESS)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
return 0;
}
Output
Hello from Kernel...
A()
0.292537
-0.718359
0.958011
0.633711kernel launch failed with error "an illegal memory access was encountered".
I have ran this both on my machine (CUDA 7.0), and a supercomputing cluster (CUDA 6.5), and the same result unfolds.
Get rid of the pass-by-reference on the kernel parameter (&).
You are not allowed to write GPU kernels that have pass-by-reference parameters. A GPU kernel cannot modify a host variable. (ignoring Unified Memory, Zero-Copy, and related mechanisms which are not at issue here.)
I'm trying to pass some POD to a kernel which has as parameters some non-POD, and has non explicit constructors. Idea behind that is: allocate some memory on the host, pass the memory to the kernel, and it encapsulate the memory in the objects without the user to explicitly do that step.
The constructors are marked as __device__ code, but they are not called when passing the parameters, and I can't figure out why.
My question is not really related about how should I do the thing, but trying to understand what's happening behind the scenes.
Here an example (I'm using CUDA 5 with a GPU of capability 2.1, hence the printf).
#include <stdio.h>
struct Test {
__device__ Test() {
printf("Default\n"),
_n = 0;
}
__device__ Test(int n) {
printf("Construct %d\n", n);
_n = n;
}
__device__ Test(const Test &t) {
printf("Copy constr %d\n", t._n);
_n = t._n;
}
__device__ Test &operator=(const Test &t) {
printf("Assignment %d\n", t._n);
_n = t._n;
return *this;
}
__device__ int calc() const {
printf("Calculating %d\n", threadIdx.x + 10 * _n);
return threadIdx.x + 10 * _n;
}
int _n;
};
__global__ void dosome(Test a, Test b) {
printf("Kernel data %d %d\n", a._n, b._n);
a.calc();
b.calc();
}
int main(int argc, char **argv) {
dosome<<<1, 2>>>(2, 3);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error:\n\t%s\n",cudaGetErrorString(cudaerr));
return 0;
}
EDIT: Forgot to say that, none of the constructor message is printed, but the calc and kernel message are.
EDIT2: Is it guaranteed that CUDA will initialize a Test object before copying it on the device?
You have to see a constructor just like a normal method. If you qualify it with __host__, then you'll be able to call it host-side. If you qualify it with __device__, you'll be able to call it device-side. If you qualify it with both, you'll be able to call it on both sides.
What happens when you do dosome<<<1, 2>>>(2, 3); is that the two objects are implictly constructed (because your constructor is not explicit, so maybe that's confusing you too) host side and then memcpy'd to the device. There is no copy-constructor involved in the process.
Let's illustrate this:
__global__ void dosome(Test a, Test b) {
a.calc();
b.calc();
}
int main(int argc, char **argv) {
dosome<<<1, 2>>>(2, 3); // Constructors must be at least __host__
return 0;
}
// Outputs:
Construct 2 (from the host side)
Construct 3 (from the host side)
Now if you change your kernel to take ints instead of Test:
__global__ void dosome(int arga, int argb) {
// Constructors must be at least __device__
Test a(arga);
Test b(argb);
a.calc();
b.calc();
}
int main(int argc, char **argv) {
dosome<<<1, 2>>>(2, 3);
return 0;
}
// Outputs:
Construct 2 (from the device side)
Construct 3 (from the device side)
Ok, I found it works (constructors are called) if I add both __host__ and __device__ qualifiers to the constructors. The constructor of the objects happened at host side, and then they were copied to device (stack?). This is why the constructors weren't called: they were device code (but what was called on the host side?!?)
Using both __host__ and __device__ in the constructors allowed to use the class without problems.
EDIT: Still, I'm not sure if the construction always happens before the copy to device.