using std::thread and CUDA together - c++

I'm looking for an quick example of using std::thread and CUDA together. When using mutiple host thread, does it require each host thread to be assigned a certain number of GPU threads that's not overlapping with each other?

You can use std::thread and CUDA together.
There is no particular arrangement required for the association between threads and GPUs. You can have 1 thread manage all GPUs, one per GPU, 4 per GPU, all threads talk to all GPUs, or whatever you like. (There is no relationship whatsoever between GPU threads and host threads, assuming by GPU threads you mean GPU threads in device code. )
Libraries like CUFFT and CUBLAS may have certain expectations about handle usage, typically that you must not share a handle between threads, and handles are inherently device-specific.
Here's a worked example demonstrating 4 threads (one per GPU) followed by one thread dispatching work to all 4 GPUs:
$ cat t1457.cu
#include <thread>
#include <vector>
#include <iostream>
#include <cstdio>
__global__ void k(int n){
printf("hello from thread %d\n", n);
}
void thread_func(int n){
if (n >= 0){
cudaSetDevice(n);
k<<<1,1>>>(n);
cudaDeviceSynchronize();}
else{
cudaError_t err = cudaGetDeviceCount(&n);
for (int i = 0; i < n; i++){
cudaSetDevice(i);
k<<<1,1>>>(-1);}
for (int i = 0; i <n; i++){
cudaSetDevice(i);
cudaDeviceSynchronize();}}
}
int main(){
int n = 0;
cudaError_t err = cudaGetDeviceCount(&n);
if (err != cudaSuccess) {std::cout << "error " << (int)err << std::endl; return 0;}
std::vector<std::thread> t;
for (int i = 0; i < n; i++)
t.push_back(std::thread(thread_func, i));
std::cout << n << " threads started" << std::endl;
for (int i = 0; i < n; i++)
t[i].join();
std::cout << "join finished" << std::endl;
std::thread ta(thread_func, -1);
ta.join();
std::cout << "finished" << std::endl;
return 0;
}
$ nvcc -o t1457 t1457.cu -std=c++11
$ ./t1457
4 threads started
hello from thread 1
hello from thread 3
hello from thread 2
hello from thread 0
join finished
hello from thread -1
hello from thread -1
hello from thread -1
hello from thread -1
finished
$
Here's an example showing 4 threads issuing work to a single GPU:
$ cat t1459.cu
#include <thread>
#include <vector>
#include <iostream>
#include <cstdio>
__global__ void k(int n){
printf("hello from thread %d\n", n);
}
void thread_func(int n){
cudaSetDevice(0);
k<<<1,1>>>(n);
cudaDeviceSynchronize();
}
int main(){
const int n = 4;
std::vector<std::thread> t;
for (int i = 0; i < n; i++)
t.push_back(std::thread(thread_func, i));
std::cout << n << " threads started" << std::endl;
for (int i = 0; i < n; i++)
t[i].join();
std::cout << "join finished" << std::endl;
return 0;
}
$ nvcc t1459.cu -o t1459 -std=c++11
$ ./t1459
4 threads started
hello from thread 0
hello from thread 1
hello from thread 3
hello from thread 2
join finished
$

Related

Threads appear to run randomly.. Reliable only after slowing down the join after thread creation

I am observing strange behavior using pthreads. Note the following code -
#include <iostream>
#include <string>
#include <algorithm>
#include <vector>
#include <pthread.h>
#include <unistd.h>
typedef struct _FOO_{
int ii=0;
std::string x="DEFAULT";
}foo;
void *dump(void *x)
{
foo *X;
X = (foo *)x;
std::cout << X->x << std::endl;
X->ii+=1;
}
int main(int argc, char **argv)
{
foo X[2];
const char *U[2] = {"Hello", "World"};
pthread_t t_id[2];
int t_status[2];
/*initalize data structures*/
for(int ii=0; ii < 2; ii+=1){
X[ii].x=U[ii];
}
foo *p = X;
for(int ii=0; ii < 2; ii+=1){
t_status[ii] = pthread_create(&t_id[ii], NULL, dump, (void *)p);
std::cout << "Thread ID = " << t_id[ii] << " Status = " << t_status[ii] << std::endl;
p+=1;
}
//sleep(1); /*if this is left commented out, one of the threads do not execute*/
for(int ii=0; ii < 2; ii+=1){
std::cout << pthread_join(t_status[ii], NULL) << std::endl;
}
for(int ii=0; ii < 2; ii+=1){
std::cout << X[ii].ii << std::endl;
}
}
When I leave the sleep(1) (between thread create and join) call commented out, I get erratic behavior in the randomly only 1 of the 2 thread run.
rajatkmitra#butterfly:~/mpi/tmp$ ./foo
Thread ID = 139646898239232 Status = 0
Hello
Thread ID = 139646889846528 Status = 0
3
3
1
0
When I uncomment sleep(1). Both threads execute reliably.
rajatkmitra#butterfly:~/mpi/tmp$ ./foo
Thread ID = 140072074356480 Status = 0
Hello
Thread ID = 140072065963776 Status = 0
World
3
3
1
1
The pthread_join() should hold up exit from the program, till both threads complete, but in this example I am unable to get that to happen without the sleep() function. I really do not like the implementation with sleep(). Can someone tell me if I am missing something??
See Peter's note -
pthread_join should be called with the thread id, not the status value that pthread_create returned. So: pthread_join(t_id[ii], NULL), not pthread_join(t_status[ii], NULL). Even better, since the question is tagged C++, use std::thread. –
Pete Becker

Can't tell if Mutex Lock is kicking in or not?

I'm working on a college assignment and have been tasked with showing a basic mutex lock example. I've never worked with threads in any form, so I'm a total beginner working with POSIX threads in C++.
What I'm trying to get the program to do is create 1000 threads that increment a global integer by 1000.
#include <iostream>
#include <stdlib.h>
#include <pthread.h>
#include <sys/types.h>
#include <unistd.h>
#include <thread>
pthread_t threadArr[1000];
pthread_mutex_t lock;
// Global int to increment
int numberToInc = 0;
void* incByTwo(void*)
{
pthread_mutex_lock(&lock);
for(int j = 0; j < 1000; j++){
numberToInc += 1;
}
pthread_mutex_unlock(&lock);
return NULL;
}
int main()
{
//Creates 1000 threads with incByTwo func
for(int i = 0; i < 1000; i++){
pthread_create(&threadArr[i], NULL, incByTwo, NULL);
}
std::cout << "\n" << numberToInc << "\n";
return 0;
}
The following produces a series of different results, obviously because the threads are executing concurrently, right?
Now, I've gotten it to work correctly by inserting
for(int i = 0; i < 1000; i++){
pthread_join(threadArr[i], NULL);
}
After the thread creation loop, but then removing the mutex locks, it still works. I've been trying to piece out how pthread_join works but I'm a little lost. Any advice?
Sorted a way to show the mutex lock in action. So when I output the global var in the function, without mutex locks it has the potential to show the results out of order.
Running the number range with mutex locks, out looks like:
1000
2000
3000
... (etc)
10000
With mutex locks removed, the output can vary in the order.
E.g.
1000
2000
4000
6000
3000
5000
7000
8000
9000
10000
While the final result of the three threads is correct, the sequence is out of order. In the context of this program it doesn't really matter but I'd imagine if it's passing inconsistently sequenced values it messes things up?
pthread_t threadArr[10];
pthread_mutex_t lock;
int numberToInc = 0;
void* incByTwo(void*)
{
pthread_mutex_lock(&lock);
for(int j = 0; j < 1000; j++){
numberToInc += 1;
}
std::cout << numberToInc << "\n";
pthread_mutex_unlock(&lock);
return NULL;
}
int main()
{
if (pthread_mutex_init(&lock, NULL) != 0)
{
printf("\n mutex init failed\n");
return 1;
}
for(int i = 0; i < 10; i++){
pthread_create(&threadArr[i], NULL, incByTwo, NULL);
}
pthread_join(threadArr[0], NULL);
return 0;
}

Why doesn't openmp place threads based on manual NUMA bind?

I'm building a numa-aware processor that binds to a given socket and accepts lambdas. Here is what I've done:
#include <numa.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <thread>
#include <vector>
using namespace std;
unsigned nodes = numa_num_configured_nodes();
unsigned cores = numa_num_configured_cpus();
unsigned cores_per_node = cores / nodes;
int main(int argc, char* argv[]) {
putenv("OMP_PLACES=sockets(1)");
cout << numa_available() << endl; // returns 0
numa_set_interleave_mask(numa_all_nodes_ptr);
int size = 200000000;
for (auto i = 0; i < nodes; ++i) {
auto t = thread([&]() {
// binding to given socket
numa_bind(numa_parse_nodestring(to_string(i).c_str()));
vector<int> v(size, 0);
cout << "node #" << i << ": on CPU " << sched_getcpu() << endl;
#pragma omp parallel for num_threads(cores_per_node) proc_bind(master)
for (auto i = 0; i < 200000000; ++i) {
for (auto j = 0; j < 10; ++j) {
v[i]++;
v[i] *= v[i];
v[i] *= v[i];
}
}
});
t.join();
}
}
However, all threads are running on socket 0. It seems numa_bind doesn't bind current thread to the given socket. The second numa processor -- Numac 1 outputs node #1: on CPU 0, which should be on CPU 1. So what's going wrong?
This works for me exactly as I expected:
#include <cassert>
#include <iostream>
#include <numa.h>
#include <omp.h>
#include <sched.h>
int main() {
assert (numa_available() != -1);
auto nodes = numa_num_configured_nodes();
auto cores = numa_num_configured_cpus();
auto cores_per_node = cores / nodes;
omp_set_nested(1);
#pragma omp parallel num_threads(nodes)
{
auto outer_thread_id = omp_get_thread_num();
numa_run_on_node(outer_thread_id);
#pragma omp parallel num_threads(cores_per_node)
{
auto inner_thread_id = omp_get_thread_num();
#pragma omp critical
std::cout
<< "Thread " << outer_thread_id << ":" << inner_thread_id
<< " core: " << sched_getcpu() << std::endl;
assert(outer_thread_id == numa_node_of_cpu(sched_getcpu()));
}
}
}
Program first create 2 (outer) threads on my dual-socket server. Then, it binds them to different sockets (NUMA nodes). Finally, it splits each thread into 20 (inner) threads, since each CPU has 10 physical cores and enabled hyperthreading.
All inner threads are running on the same socket as its parent thread. That is on cores 0-9 and 20-29 for outer thread 0, and on cores 10-19 and 30-39 for outer thread 1. (sched_getcpu() returned the number of virtual core from range 0-39 in my case.)
Note that there is no C++11 threading, just pure OpenMP.

How to create a certain number of threads based on a value a variable contains?

I have a integer variable, that contains the number of threads to execute. Lets call it myThreadVar. I want to execute myThreadVar threads, and cannot think of any way to do it, without a ton of if statements. Is there any way I can create myThreadVar threads, no matter what myThreadVar is?
I was thinking:
for (int i = 0; i < myThreadVar; ++i) { std::thread t_i(myFunc); }, but that obviously won't work.
Thanks in advance!
Make an array or vector of threads, put the threads in, and then if you want to wait for them to finish have a second loop go over your collection and join them all:
std::vector<std::thread> myThreads;
myThreads.reserve(myThreadVar);
for (int i = 0; i < myThreadVar; ++i)
{
myThreads.push_back(std::thread(myFunc));
}
While other answers use vector::push_back(), I prefer vector::emplace_back(). Possibly more efficient. Also use vector::reserve(). See it live here.
#include <thread>
#include <vector>
void func() {}
int main() {
int num = 3;
std::vector<std::thread> vec;
vec.reserve(num);
for (auto i = 0; i < num; ++i) {
vec.emplace_back(func);
}
for (auto& t : vec) t.join();
}
So, obvious the best solution is not to wait previous thread to done. You need to run all of them in parallel.
In this case you can use vector class to store all of instances and after that make join to all of them.
Take a look at my example.
#include <thread>
#include <vector>
void myFunc() {
/* Some code */
}
int main()
{
int myThreadVar = 50;
std::vector <thread> threadsToJoin;
threadsToJoin.resize(myThreadVar);
for (int i = 0; i < myThreadVar; ++i) {
threadsToJoin[i] = std::thread(myFunc);
}
for (int i = 0; i < threadsToJoin.size(); i++) {
threadsToJoin[i].join();
}
}
#include <iostream>
#include <thread>
void myFunc(int n) {
std::cout << "myFunc " << n << std::endl;
}
int main(int argc, char *argv[]) {
int myThreadVar = 5;
for (int i = 0; i < myThreadVar; ++i) {
std::cout << "Launching " << i << std::endl;
std::thread t_i(myFunc,i);
t_i.detach();
}
}
g++ -std=c++11 -o 35106568 35106568.cpp
./35106568
Launching 0
myFunc 0
Launching 1
myFunc 1
Launching 2
myFunc 2
Launching 3
myFunc 3
Launching 4
myFunc 4
You need to store the thread so you can send it to join.
std::thread t[myThreadVar];
for (int i = 0; i < myThreadVar; ++i) { t[i] = std::thread(myFunc); }//Start all threads
for (int i = 0; i < myThreadVar; ++i) {t[i].join;}//Wait for all threads to finish
I think this is valid syntax, but I'm more used to c so I am unsure if I initialized the array correctly.

Why don't threads seem to run in parallel in this code?

This is the first time I am working with threads so I am sorry if this is a bad question. Shouldn't the output be consisted of "randomized" mains and foos? What I get seems to be a column of foos and a column of mains.
#include <iostream>
#include <thread>
void foo() {
for (int i = 0; i < 20; ++i) {
std::cout << "foo" << std::endl;
}
}
int main(int argc, char** argv) {
std::thread first(foo);
for (int i = 0; i < 20; ++i) {
std::cout << "main" << std::endl;
}
first.join();
return 0;
}
There is a overhead starting a tread. So in this simple example the output is completely unpredictable. Both for loops running very short, and therefore if the thread start is only even a millisecond late, both code segments are executed sequentially instead of parallel. But if the operating system schedules the thread first, the "foo" sequence is showing before the "main" sequence.
Insert some sleep calls into the thread and the main function to see if they really run parallel.
#include <iostream>
#include <thread>
#include <unistd.h>
void foo() {
for (int i = 0; i < 20; ++i) {
std::cout << "foo" << std::endl;
sleep(1);
}
}
int main(int argc, char** argv) {
std::thread first(foo);
for (int i = 0; i < 20; ++i) {
std::cout << "main" << std::endl;
sleep(1);
}
first.join();
return 0;
}
Using threads does not automatically enforce parallel execution of code segments, because if you e.g. have only one CPU in your system, the execution is switched between all processes and threads, and code segments are never running parallel.
There is a good wikipedia article about threads here. Especially read the section about "Multithreading".
After cout try to yield. This may honor any waiting thread. (Although implementation dependent)