How to pass and start multiple threads within a function? - c++

I want to pass in an arbitrary number of functions together with their arguments to a function called startThread so that it can run them concurrently.
My code is below but obviously, it has syntactic errors:
#include <iostream>
#include <thread>
#include <chrono>
#include <vector>
#include <exception>
int test1( int i, double d )
{
// do something...
using namespace std::chrono_literals;
std::this_thread::sleep_for( 3000ms );
return 0;
}
int test2( char c )
{
// do something...
using namespace std::chrono_literals;
std::this_thread::sleep_for( 2000ms );
return 0;
}
template< class Fn, class... Args > // how should I write the template and startThread params to
int startThread( Fn&&... fns, Args&&... args ) // get arbitrary functions as threads and their own arguments?
{
std::vector< std::thread > threads;
threads.push_back( std::thread( test1, 2, 65.2 ) ); // how to automate the task of starting the
threads.push_back( std::thread( test2, 'A' ) ); // threads instead of writing them one by one?
std::cout << "synchronizing all threads...\n";
for ( auto& th : threads ) th.join();
return 0;
}
int main( )
{
int successIndicator { };
try
{
successIndicator = startThread( test1( 2, 65.2 ), test2( 'A' ) ); // what should be passed to startThread?
} // How to pass the arguments?
catch ( const std::exception& e )
{
successIndicator = -1;
std::cerr << e.what( ) << '\n';
}
return successIndicator;
}
Thanks in advance.

This is how I would do it, using a recursive template function.
And std::async instead of std::thread
#include <future>
#include <chrono>
#include <thread>
#include <iostream>
void test1(int /*i*/, double /*d*/)
{
std::cout << "Test1 start\n";
std::this_thread::sleep_for(std::chrono::milliseconds(300));
std::cout << "Test1 done\n";
}
void test2(bool)
{
std::cout << "Test2 start\n";
std::this_thread::sleep_for(std::chrono::milliseconds(500));
std::cout << "Test2 done\n";
}
//-----------------------------------------------------------------------------
// Recursive template function that will start all passed functions
// and then waits for them to be finished one by one.
// this will still be limited by the slowest function
// so no need to store them in a collection or something
template<typename Fn, typename... Fns>
void run_parallel(Fn fn, Fns&&... fns)
{
// I prefer using std::async instead of std::thread
// it has a better abstraction and futures
// allow passing of exceptions back to the calling thread.
auto future = std::async(std::launch::async, fn);
// are there any more functions to start then do so
if constexpr (sizeof...(fns) > 0)
{
run_parallel(std::forward<Fns>(fns)...);
}
future.get();
}
//-----------------------------------------------------------------------------
int main()
{
std::cout << "main start\n";
// start all functions by creating lambdas for them
run_parallel(
[]() { test1(1, 1.10); },
[]() { test2(true); }
);
std::cout << "main done\n";
}

You can pack these functions into a tuple, and then pack the parameters corresponding to each function into a tuple, and then pass them into startThread() together, then expand the function through std::apply, and then expand the corresponding parameters through std::apply and pass them into the function. Something like this:
template<class FunTuple, class... ArgsTuple>
int startThread(FunTuple fun_tuple, ArgsTuple... args_tuple) {
std::vector<std::thread> threads;
std::apply([&](auto... fun) {
(threads.emplace_back(
[&] { std::apply([&](auto... args) { fun(args...); }, args_tuple); }
), ...);
}, fun_tuple);
std::cout << "synchronizing all threads...\n";
for (auto& th : threads ) th.join();
return 0;
}
Then you can invoke startThread() like this:
startThread(std::tuple(test1, test2), std::tuple(2, 65.2), std::tuple('A'));
Demo.

Related

Determining function time using a wrapper

I'm looking for a generic way of measuring a functions timing like Here, but for c++.
My main goal is to not have cluttered code like this piece everywhere:
auto t1 = std::chrono::high_resolution_clock::now();
function(arg1, arg2);
auto t2 = std::chrono::high_resolution_clock::now();
auto tDur = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1);
But rather have a nice wrapper around the function.
What I got so far is:
timing.hpp:
#pragma once
#include <chrono>
#include <functional>
template <typename Tret, typename Tin1, typename Tin2> unsigned int getDuration(std::function<Tret(Tin1, Tin2)> function, Tin1 arg1, Tin2 arg2, Tret& retValue)
{
auto t1 = std::chrono::high_resolution_clock::now();
retValue = function(arg1, arg2);
auto t2 = std::chrono::high_resolution_clock::now();
auto tDur = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1);
return tDur.count();
}
main.cpp:
#include "timing.hpp"
#include "matrix.hpp"
constexpr int G_MATRIXSIZE = 2000;
int main(int argc, char** argv)
{
CMatrix<double> myMatrix(G_MATRIXSIZE);
bool ret;
// this call is quite ugly
std::function<bool(int, std::vector<double>)> fillRow = std::bind(&CMatrix<double>::fillRow, &myMatrix, 0, fillVec);
auto duration = getDuration(fillRow, 5, fillVec, ret );
std::cout << "duration(ms): " << duration << std::endl;
}
in case sb wants to test the code, matrix.hpp:
#pragma once
#include <iostream>
#include <string>
#include <sstream>
#include <vector>
template<typename T> class CMatrix {
public:
// ctor
CMatrix(int size) :
m_size(size)
{
m_matrixData = new std::vector<std::vector<T>>;
createUnityMatrix();
}
// dtor
~CMatrix()
{
std::cout << "Destructor of CMatrix called" << std::endl;
delete m_matrixData;
}
// print to std::out
void printMatrix()
{
std::ostringstream oss;
for (int i = 0; i < m_size; i++)
{
for (int j = 0; j < m_size; j++)
{
oss << m_matrixData->at(i).at(j) << ";";
}
oss << "\n";
}
std::cout << oss.str() << std::endl;
}
bool fillRow(int index, std::vector<T> row)
{
// checks
if (!indexValid(index))
{
return false;
}
if (row.size() != m_size)
{
return false;
}
// data replacement
for (int j = 0; j < m_size; j++)
{
m_matrixData->at(index).at(j) = row.at(j);
}
return true;
}
bool fillColumn(int index, std::vector<T> column)
{
// checks
if (!indexValid(index))
{
return false;
}
if (column.size() != m_size)
{
return false;
}
// data replacement
for (int j = 0; j < m_size; j++)
{
m_matrixData->at(index).at(j) = column.at(j);
}
return true;
}
private:
// variables
std::vector<std::vector<T>>* m_matrixData;
int m_size;
bool indexValid(int index)
{
if (index + 1 > m_size)
{
return false;
}
return true;
}
// functions
void createUnityMatrix()
{
for (int i = 0; i < m_size; i++)
{
std::vector<T> _vector;
for (int j = 0; j < m_size; j++)
{
if (i == j)
{
_vector.push_back(1);
}
else
{
_vector.push_back(0);
}
}
m_matrixData->push_back(_vector);
}
}
};
The thing is, this code is still quite ugly due to the std::function usage. Is there a better and/or simpler option ?
(+ also I'm sure I messed sth up with the std::bind, I think I need to use std::placeholders since I want to set the arguments later on.)
// edit, correct use of placeholder in main:
std::function<bool(int, std::vector<double>)> fillRow = std::bind(&CMatrix<double>::fillRow, &myMatrix, std::placeholders::_1, std::placeholders::_2);
auto duration = getDuration(fillRow, 18, fillVec, ret );
You can utilize RAII to implement a timer that records the execution time of a code block and a template function that wraps the function you would like to execute with the timer.
#include<string>
#include<chrono>
#include <unistd.h>
struct Timer
{
std::string fn, title;
std::chrono::time_point<std::chrono::steady_clock> start;
Timer(std::string fn, std::string title)
: fn(std::move(fn)), title(std::move(title)), start(std::chrono::steady_clock::now())
{
}
~Timer()
{
const auto elapsed =
std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::steady_clock::now() - start).count();
printf("%s: function=%s; elasepd=%f ms\n", title.c_str(), fn.c_str(), elapsed / 1000.0);
}
};
#ifndef ENABLE_BENCHMARK
static constexpr inline void dummy_fn() { }
#define START_BENCHMARK_TIMER(...) dummy_fn()
#else
#define START_BENCHMARK_TIMER(title) bench::Timer timer(__FUNCTION__, title)
#endif
template<typename F, typename ...Args>
auto time_fn(F&& fn, Args&&... args) {
START_BENCHMARK_TIMER("wrapped fn");
return fn(std::forward<Args>(args)...);
}
int foo(int i) {
usleep(70000);
return i;
}
int main()
{
printf("%d\n", time_fn(foo, 3));
}
stdout:
wrapped fn: function=time_fn; elasepd=71.785000 ms
3
General Idea:
time_fn is a simple template function that calls START_BENCHMARK_TIMER and calls fn with the provided arguments
START_BENCHMARK_TIMER then creates a Timer object. It will record the current time in start. Do note that __FUNCTION__ will be replaced with the function that was called.
When the
provided fn returns or throws an exception, the Timer object from (1) will be destroyed and the destructor will be called. The destructor will then calculate the time difference between the current time and the recorded start time and prints it to stdout
Note:
Even though declaring start and end in time_fn instead of the RAII timer will work, having an RAII timer will allow you to cleanly handle the situation when fn throws an exception
If you are on c++11, you will need to change time_fn declaration to typename std::result_of<F &&(Args &&...)>::type time_fn(F&& fn, Args&&... args).
Edit: Updated the response to include a wrapper function approach.

How to wait until all threads from the pool ends their work?

I am trying to implement simple thread pool using boost library.
Here is code:
//boost::asio::io_service ioService;
//boost::thread_group pool;
//boost::asio::io_service::work* worker;
ThreadPool::ThreadPool(int poolSize /*= boost::thread::hardware_concurrency()*/)
{
if (poolSize >= 1 && poolSize <= boost::thread::hardware_concurrency())
threadAmount = poolSize;
else
threadAmount = 1;
worker = NULL;
}
ThreadPool::~ThreadPool()
{
if (worker != NULL && !ioService.stopped())
{
_shutdown();
delete worker;
worker = NULL;
}
}
void ThreadPool::start()
{
if (worker != NULL)
{
return;
}
worker = new boost::asio::io_service::work(ioService);
for (int i = 0; i < threadAmount; ++i)
{
pool.create_thread(boost::bind(&boost::asio::io_service::run, &ioService));
}
}
template<class F, class...Args>
void ThreadPool::execute(F f, Args&&... args)
{
ioService.post(boost::bind(f, std::forward<Args>(args)...));
}
void ThreadPool::shutdown()
{
pool.interrupt_all();
_shutdown();
}
void ThreadPool::join_all()
{
// wait for all threads before continue
// in other words - barier for all threads when they finished all jobs
// and to be able re-use them in futur.
}
void ThreadPool::_shutdown()
{
ioService.reset();
ioService.stop();
}
In my program i assign to thread pool some tasks that needs to be done, and going further with main thread. At some point i need to wait for all threads to finished all tasks before i could proceed calculations. Is there any way to do this ?
Thanks a lot.
As others have pointed out, the main culprit is the work instance.
I'd much simplify the interface (there's really no reason to split shutdown into shutdown, _shutdown, join_all and some random logic in the destructor as well. That just makes it hard to know what responsibility is where.
The interface should be a Pit Of Success - easy to use right, hard to use wrong.
At the same time it makes it much easier to implement it correctly.
Here's a first stab:
Live On Coliru
#include <boost/asio.hpp>
#include <boost/thread.hpp>
namespace ba = boost::asio;
struct ThreadPool {
ThreadPool(unsigned poolSize = boost::thread::hardware_concurrency());
~ThreadPool();
void start();
template <typename F, typename... Args>
void execute(F f, Args&&... args) {
ioService.post(std::bind(f, std::forward<Args>(args)...));
}
private:
unsigned threadAmount;
ba::io_service ioService;
boost::thread_group pool;
std::unique_ptr<ba::io_service::work> work;
void shutdown();
};
ThreadPool::ThreadPool(
unsigned poolSize /*= boost::thread::hardware_concurrency()*/) {
threadAmount = std::max(1u, poolSize);
threadAmount = std::min(boost::thread::hardware_concurrency(), poolSize);
}
ThreadPool::~ThreadPool() {
shutdown();
}
void ThreadPool::start() {
if (!work) {
work = std::make_unique<ba::io_service::work>(ioService);
for (unsigned i = 0; i < threadAmount; ++i) {
pool.create_thread(
boost::bind(&ba::io_service::run, &ioService));
}
}
}
void ThreadPool::shutdown() {
work.reset();
pool.interrupt_all();
ioService.stop();
pool.join_all();
ioService.reset();
}
#include <iostream>
using namespace std::chrono_literals;
int main() {
auto now = std::chrono::high_resolution_clock::now;
auto s = now();
{
ThreadPool p(10);
p.start();
p.execute([] { std::this_thread::sleep_for(1s); });
p.execute([] { std::this_thread::sleep_for(600ms); });
p.execute([] { std::this_thread::sleep_for(400ms); });
p.execute([] { std::this_thread::sleep_for(200ms); });
p.execute([] { std::this_thread::sleep_for(10ms); });
}
std::cout << "Total elapsed: " << (now() - s) / 1.0s << "s\n";
}
Which on most multi-core systems will print something like on mine:
Total elapsed: 1.00064s
It looks like you had an error in calculating threadAmount where you'd take 1 if poolSize was more than hardware_concurrency.
To be honest, why have the bind in the implementation? It really doesn't add a lot, you can leave it up to the caller, and they can choose whether they use bind, and if so, whether it's boost::bind, std::bind or some other way of composing calleables:
template <typename F>
void execute(F f) { ioService.post(f); }
You're missing exception handling around io_service::run calls (see Should the exception thrown by boost::asio::io_service::run() be caught?).
If you're using recent boost version, you can use the newer io_context and thread_pool interfaces, greatly simplifying things:
Live On Coliru
#include <boost/asio.hpp>
struct ThreadPool {
ThreadPool(unsigned poolSize)
: pool(std::clamp(poolSize, 1u, std::thread::hardware_concurrency()))
{ }
template <typename F>
void execute(F f) { post(pool, f); }
private:
boost::asio::thread_pool pool;
};
This still has 99% of the functionality¹, but in 10 LoC.
In fact, the class has become a trivial wrapper, so we could just write:
Live On Coliru
#include <boost/asio.hpp>
#include <iostream>
using namespace std::chrono_literals;
using C = std::chrono::high_resolution_clock;
static void sleep_for(C::duration d) { std::this_thread::sleep_for(d); }
int main() {
auto s = C::now();
{
boost::asio::thread_pool pool;
post(pool, [] { sleep_for(1s); });
post(pool, [] { sleep_for(600ms); });
// still can bind if you want
post(pool, std::bind(sleep_for, 400ms));
post(pool, std::bind(sleep_for, 200ms));
post(pool, std::bind(sleep_for, 10ms));
//pool.join(); // implicit in destructor
}
std::cout << "Total elapsed: " << (C::now() - s) / 1.0s << "s\n";
}
Main difference is the default pool size: it is 2*hardware concurrency (but also calculated more safely, because not all platforms have a reliable hardware_concurrency() - it could be zero, e.g.).
¹ It doesn't currently exercise interruptions points

std::packaged_task with std::placeholders

MAJOR EDIT TO SIMPLIFY CODE (and solved)
I would like to be able to make a packaged task that has a free unbound argument, which I will then add at call time of the packaged task.
In this case, I want the first argument to the function (of type size_t) to be unbound.
Here is a working minimal example (this was the solution):
#include <vector>
#include <queue>
#include <memory>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
#include <functional>
#include <stdexcept>
#include <cstdlib>
#include <cstdio>
//REV: I'm trying to "trick" this into for double testfunct( size_t arg1, double arg2), take enqueue( testfunct, 1.0 ), and then internally, execute
// return testfunct( internal_size_t, 1.0 )
template<typename F, typename... Args>
auto enqueue(F&& f, Args&&... args)
-> std::future<typename std::result_of<F(size_t, Args...)>::type>
{
using return_type = typename std::result_of<F(size_t, Args...)>::type;
//REV: this is where the error was, I was being stupid and thinking this task_contents which would be pushed to the queue should be same (return?) type as original function? Changed to auto and everything worked... (taking into account Jans's result_type(size_t) advice into account.
//std::function<void(size_t)> task_contents = std::bind( std::forward<F>(f), std::placeholders::_1, std::forward<Args>(args)... );
auto task_contents = std::bind( std::forward<F>(f), std::placeholders::_1, std::forward<Args>(args)... );
std::packaged_task<return_type(size_t)> rawtask(
task_contents );
std::future<return_type> res = rawtask.get_future();
size_t arbitrary_i = 10;
rawtask(arbitrary_i);
return res;
}
double testfunct( size_t threadidx, double& arg1 )
{
fprintf(stdout, "Double %lf Executing on thread %ld\n", arg1, threadidx );
std::this_thread::sleep_for( std::chrono::milliseconds(1000) );
return 10; //true;
}
int main()
{
std::vector<std::future<double>> myfutures;
for(size_t x=0; x<100; ++x)
{
double a=x*10;
myfutures.push_back(
enqueue( testfunct, std::ref(a) )
);
}
for(size_t x=0; x<100; ++x)
{
double r = myfutures[x].get();
fprintf(stdout, "Got %ld: %f\n", x, r );
}
}
The main issues are on ThreadPool::enqueue:
std::function<void(size_t)> task1 = std::bind( std::forward<F>(f), std::placeholders::_1, std::forward<Args>(args)... );
Here, the type of task1 is std::function<void(std::size_t)> but the result of the std::bind when evaluated with funct is convertible to std::function<bool(std::size_t)> and even though as #T.C has pointed out, you can assign the result of the bind to task1, in order to pass task1 to std::make_shared you need to honor the return_type you've got.
Change the above to:
std::function<return_type(size_t)> task1 = std::bind( std::forward<F>(f), std::placeholders::_1, std::forward<Args>(args)... );
Now the same for:
auto task = std::make_shared< std::packaged_task<return_type()> >( task1 );
but in this case is the parameter type that is missing. Change it to:
auto task = std::make_shared< std::packaged_task<return_type(std::size_t)> >( task1 );
ThreadPool::tasks store function objects of type std::function<void(std::size_t)> but you're storing lambda that receive no arguments. Change the tasks.emplace(...) to:
tasks.emplace([task](std::size_t a){ (*task)(a); });
The code is not very well formatted, but a solution.
First, you should wrap the results in the lambda creation, not pass functions that can return anything. But if you want to use a shared pointer on a task, this works.
In the prototype:
std::future<void> enqueue(std::function<void(size_t)> f);
using Task = std::function<void(size_t)>;
// the task queue
std::queue<Task> tasks;
std::optional<Task> pop_one();
Implementation becomes:
ThreadPool::ThreadPool(size_t threads)
: stop(false)
{
for(size_t i = 0;i<threads;++i)
workers.emplace_back(
[this,i]
{
for(;;)
{
auto task = pop_one();
if(task)
{
(*task)(i);
}
else break;
}
}
);
}
std::optional<ThreadPool::Task> ThreadPool::pop_one()
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->condition.wait(lock,
[this]{ return this->stop || !this->tasks.empty(); });
if(this->stop && this->tasks.empty())
{
return std::optional<Task>();
}
auto task = std::move(this->tasks.front()); //REV: this moves into my thread the front of the tasks queue.
this->tasks.pop();
return task;
}
template<typename T>
std::future<T> ThreadPool::enqueue(std::function<T(size_t)> fun)
{
auto task = std::make_shared< std::packaged_task<T(size_t)> >([=](size_t size){return fun(size);});
auto res = task->get_future();
{
std::unique_lock<std::mutex> lock(queue_mutex);
// don't allow enqueueing after stopping the pool
if(stop)
{
throw std::runtime_error("enqueue on stopped ThreadPool");
}
tasks.emplace([=](size_t size){(*task)(size);});
}
condition.notify_one();
return res;
}
And now you can have your main:
int main()
{
size_t nthreads=3;
ThreadPool tp(nthreads);
std::vector<std::future<bool>> myfutures;
for(size_t x=0; x<100; ++x)
{
myfutures.push_back(
tp.enqueue<bool>([=](size_t threadidx) {return funct(threadidx, (double)x * 10.0);}));
}
for(size_t x=0; x<100; ++x)
{
bool r = myfutures[x].get();
std::cout << "Got " << r << "\n";
}
}
There is now an explicit return type when wrapping the lambda, as the return type is templated.

c++ class method that takes arbitrary number of callbacks and stores results

I've been trying to think of a way to have my class method take an arbitrary number of callback functions, run all of them, and then store the output. I think this works, but is there a way I can do this where I don't have to make the user wrap all of the callback functions into a vector? This also just feels messy. Feel free to mention other things that are not ideal.
#include <iostream>
#include <functional>
#include <vector>
class MyObj{
public:
// where I store stuff
std::vector<double> myResults;
// function that is called intermittently
void runFuncs(const std::vector<std::function<double()> >& fs){
if ( myResults.size() == 0){
for( auto& f : fs){
myResults.push_back(f());
}
}else{
int i (0);
for( auto& f : fs){
myResults[i] = f();
i++;
}
}
}
};
int main(int argc, char **argv)
{
auto lambda1 = [](){ return 1.0;};
auto lambda2 = [](){ return 2.0;};
MyObj myThing;
std::vector<std::function<double()> > funcs;
funcs.push_back(lambda1);
funcs.push_back(lambda2);
myThing.runFuncs(funcs);
std::cout << myThing.myResults[0] << "\n";
std::cout << myThing.myResults[1] << "\n";
std::vector<std::function<double()> > funcs2;
funcs2.push_back(lambda2);
funcs2.push_back(lambda1);
myThing.runFuncs(funcs2);
std::cout << myThing.myResults[0] << "\n";
std::cout << myThing.myResults[1] << "\n";
return 0;
}
Something like this, perhaps:
template <typename... Fs>
void runFuncs(Fs... fs) {
myResults = std::vector<double>({fs()...});
}
Then you can call it as
myThing.runFuncs(lambda1, lambda2);
Demo

std::Threads of non-void functions ( C++11 )

How can i get the return of the functions sent to the std::threads?
I'm working ina function that apllies a filter, created to 2d Images, in each channel of an color/multispectral image. But many of the functions previously implemented in this library have an image as return, i tried to create a function that takes the return image as a parameter, but it didn't work. Here's a copy of the code:
template< class D, class A >
template < typename... Args>
void Image < D, A >::VoidFunction( Image< D, A > &out, Image < D, A > function ( const Image< D, A >& , Args ... ),
const Image <D, A > &in, Args... args ) {
out = ( function ) ( in, args... );
return ;
}
template< class D, class A >
template < typename... Args>
Image < D, A > Image < D, A >::multiSpecImgFilter( Image < D, A > function ( const Image<D, A>& , Args ... ),
const Image <D, A > &img, Args... args ) {
if ( img.Dims() != 3 ) {
std::string msg( std::string( __FILE__ ) + ": " + std::to_string( __LINE__ ) + ": Image<D,A> " + "::" +
std::string( __FUNCTION__ ) + ": error: Image channels must have 2 dimensions" );
throw( std::logic_error( msg ) );
}
std::vector< Image < D, A > > channel = img.Split( );
// std::vector< std::thread > threads ;
// for( size_t thd = 0; thd < channel.size(); ++thd )
// threads[ thd ].join( );
try {
for ( int ch = 0; ch < channel.size() ; ch++ )
std::thread thd ( &VoidFunction, channel[ch], function, channel[ch], args... );
}
catch( ... ) {
for ( int ch = 0; ch < img.size(2) ; ch++ )
channel[ ch ] = ( function ) ( channel [ ch ], args... );
}
return ( Image< D, A >::Merge( channel, img.PixelSize().back(), img.Channel().back() ) );
}
You can use a lambda and store the result in it.
#include <iostream>
#include <thread>
int threadFunction()
{
return 8;
}
int main()
{
int retCode;
//retCode is captured by ref to be sure the modifs are also in the main thread
std::thread t([&retCode](){
std::cout << "thread function\n";
retCode=threadFunction();
});
std::cout << "main thread\n";//this will display 8
t.join();//to avoid a crash
return 0;
}
or pass by std::ref. This is an example from :
https://www.acodersjourney.com/c11-multithreading-tutorial-via-faq-thread-management-basics/
#include <string>
#include <thread>
#include <iostream>
#include <functional>
using namespace std;
void ChangeCurrentMissileTarget(string& targetCity)
{
targetCity = "Metropolis";
cout << " Changing The Target City To " << targetCity << endl;
}
int main()
{
string targetCity = "Star City";
thread t1(ChangeCurrentMissileTarget, std::ref(targetCity));
t1.join();
cout << "Current Target City is " << targetCity << endl;
return 0;
}
The canonical way of doing this, I suppose, is to use async.
#include <functional>
#include <future>
#include <iostream>
int main()
{
auto result = std::async(std::launch::async, std::plus<>{}, 42, 42);
std::cout << result.get() << "\n"; // outputs 84
}
(live demo)
I'd say this looks better than using an output parameter.
You can also directly use a packaged_task:
#include <functional>
#include <future>
#include <iostream>
int main()
{
std::packaged_task<int(int, int)> task{std::plus<>{}};
auto result = task.get_future();
std::thread t{std::move(task), 42, 42};
std::cout << result.get() << "\n"; // outputs 84
t.join();
}
(live demo)