I am implementing a concurrent wrapper as introduced by Herb Sutter presented in his talk "C++ and Beyond 2012".
template <typename T>
class ConcurrentWrapper {
private:
std::deque<std::unique_ptr<std::function<void()>>> _tasks;
std::mutex _mutex;
std::condition_variable _cond;
T _object;
std::thread _worker;
std::atomic<bool> _done {false};
public:
template <typename... ArgsT>
ConcurrentWrapper(ArgsT&&... args) :
_object {std::forward<ArgsT>(args)...},
_worker {
[&]() {
typename decltype(_tasks)::value_type task;
while(!_done) {
{
std::unique_lock<std::mutex> lock(_mutex);
while(_tasks.empty()) {
_cond.wait(lock);
}
task = std::move(_tasks.front());
_tasks.pop_front();
}
(*task)();
}
}
} {
}
~ConcurrentWrapper() {
{
std::unique_lock<std::mutex> lock(_mutex);
_tasks.push_back(std::make_unique<std::function<void()>>(
[&](){_done = true;}
));
}
_cond.notify_one();
_worker.join();
}
template <typename F, typename R = std::result_of_t<F(T&)>>
std::future<R> operator()(F&& f) {
std::packaged_task<R(T&)> task(std::forward<F>(f));
auto fu = task.get_future();
{
std::unique_lock<std::mutex> lock(_mutex);
_tasks.push_back(std::make_unique<std::function<void()>>(
[this, task=MoveOnCopy<decltype(task)>(std::move(task))]() {
task.object(this->_object);
}
));
}
_cond.notify_one();
return fu;
}
};
Basically, the idea is to wrap an object and provide thread-safe access in FIFO order using operation (). However, in some runs (not always happen), the following program hanged:
ConcurrentWrapper<std::vector<int>> results;
results(
[&](std::vector<T>& data) {
std::cout << "sorting...\n";
std::sort(data.begin(), data.end());
std::cout << "done ...\n";
EXPECT_EQ(data, golden);
}
).get();
However, the program work correctly without explicitly calling get() method.
results(
[&](std::vector<T>& data) {
std::cout << "sorting...\n";
std::sort(data.begin(), data.end());
std::cout << "done ...\n";
EXPECT_EQ(data, golden);
}
); // Function correctly without calling get
What could the be problem? Did I implement something wrong? I noticed a posted here saying that "a packaged_task needs to be invoked before you call f.get(), otherwise you program will freeze as the future will never become ready." Is this true? If yes, how can I get this problem solved?
I was compiling the code using -std=c++1z -pthread with G++ 6.1
Related
Can't figure out where is std::this_thread for jthread?
I have a function that theoretically makes a jthread sleep until a cancellation is requested:
template<typename Rep, typename Period>
void sleep_for(const std::chrono::duration<Rep, Period>& d, const std::stop_token& token)
{
std::condition_variable cv;
std::mutex mutex;
std::unique_lock<std::mutex> lock{ mutex };
std::stop_callback stop_wait{ token, [&cv]()
{
cv.notify_one(); }
};
cv.wait_for(lock, d, [&token]()
{
return token.stop_requested();
});
}
How do I call it on jthread?
Theoretically the program below exits within 1 second:
int main()
{
std::jthread t([]()
{
//where do I get `stop_token`?
sleep_for(std::chrono::seconds(5), std::this_jthread::get_stop_token());
});
std::this_thread::sleep_for(std::chrono::seconds(1));
t.request_stop();
return 0;
}
The jthread constructor accepts a function that takes a std::stop_token
as its first argument, which will be passed in by the jthread from its
internal stop_source.
Here is an example:
std::jthread t([](std::stop_token stop_token)
{
while(!stop_token.stop_requested()) {
//Process data...
std::this_thread::sleep_for(std::chrono::seconds(5));
}
});
std::this_thread::sleep_for(std::chrono::seconds(1));
t.request_stop();
live on Godbolt.
I'm sorry if I'm getting the whole concept wrong, but I'm trying to make a tuple the container of the actual objects, where only with its destruction those objects will go out of scope.
I currently have this:
class MiniThread {
public:
~MiniThread() {
if (m_thread) {
if (m_thread->joinable())
m_thread->join();
delete m_thread;
}
}
void join()
{
if (m_thread == nullptr)
return;
m_thread->join();
m_thread = nullptr;
}
template<typename F, typename... Args>
void run(F func, Args... args)
{
if (m_thread != nullptr)
join();
auto tuple = std::forward_as_tuple(args...);
m_thread = new std::thread([=]() {
__try
{
std::apply(func, tuple);
}
__except (CrashDump::GenerateDump(GetExceptionInformation()))
{
// TODO: log.
exit(1);
}
});
m_started = true;
}
bool started() const { return m_started; }
private:
std::thread *m_thread = nullptr;
bool m_started = false;
};
std::string getString()
{
return std::string("sono");
}
int main()
{
auto test = [&](std::string seila, const std::string& po, std::promise<int>* p)
{
std::cout << seila.c_str() << std::endl;
std::cout << po.c_str() << std::endl;
p->set_value(10);
};
std::promise<int> p;
std::future<int> f;
MiniThread thread;
std::string hello = "hello";
std::string seilapo = "seilapo";
f = p.get_future();
thread.run(test, getString(), "how are you", &p);
thread.join();
int ftest = f.get();
std::cout << ftest << std::endl;
}
By the time the thread is ran, the args are no longer reliable. They have been destructed already. So I was wondering if is there a way to copy them in the call of the thread by value. I have made some attempts of moving the variadic arguments into tuples, but tuples are always rendered with rvalues and fail all the same.
This:
auto tuple = std::forward_as_tuple(args...);
Creates a tuple of references into args... That's what forward_as_tuple's job is. You're then capturing that tuple of references by value:
m_thread = new std::thread([=]{ /* ... */ });
So once your arguments go out of scope, you're only holding onto references to them... and that'll dangle.
But you don't actually... need to have a tuple at all. Just copy the arguments themselves:
m_thread = std::thread([=]() {
func(args...); // func and args, no tuple here
});
Also don't write new thread - thread is already a handle type, just create one.
The above copies the arguments. If you want to move them, then in C++17, yes you'll need to have a tuple and use std::apply. But not forward_as_tuple... just make_tuple:
m_thread = std::thread([func, args=std::make_tuple(std::move(args)...)]() mutable {
std::apply(func, std::move(args));
});
In C++20, you won't need the tuple again, and can write a pack-expansion:
m_thread = std::thread([func, ...args=std::move(args)]() mutable {
func(std::move(args)...);
});
Is there an standard way to add a std::packaged_task to an existing thread? There's a nontrivial amount of overhead that must happen before the task is run, so I want to do that once, then keep the thread running and waiting for tasks to execute. I want to be able to use futures so I can optionally get the result of the task and catch exceptions.
My pre-C++11 implementation requires my tasks to inherit from an abstract base class with a Run() method (a bit of a pain, can't use lambdas), and having a std::deque collection of those that I add to in the main thread and dequeue from in the worker thread. I have to protect that collection from simultaneous access and provide a signal to the worker thread that there's something to do so it isn't spinning or sleeping. Enqueing something returns a "result" object with a synchronization object to wait for the task to complete, and a result value. It all works well but it's time for an upgrade if there's something better.
Here is a toy thread pool:
template<class T>
struct threaded_queue {
using lock = std::unique_lock<std::mutex>;
void push_back( T t ) {
{
lock l(m);
data.push_back(std::move(t));
}
cv.notify_one();
}
boost::optional<T> pop_front() {
lock l(m);
cv.wait(l, [this]{ return abort || !data.empty(); } );
if (abort) return {};
auto r = std::move(data.back());
data.pop_back();
return std::move(r);
}
void terminate() {
{
lock l(m);
abort = true;
data.clear();
}
cv.notify_all();
}
~threaded_queue()
{
terminate();
}
private:
std::mutex m;
std::deque<T> data;
std::condition_variable cv;
bool abort = false;
};
struct thread_pool {
thread_pool( std::size_t n = 1 ) { start_thread(n); }
thread_pool( thread_pool&& ) = delete;
thread_pool& operator=( thread_pool&& ) = delete;
~thread_pool() = default; // or `{ terminate(); }` if you want to abandon some tasks
template<class F, class R=std::result_of_t<F&()>>
std::future<R> queue_task( F task ) {
std::packaged_task<R()> p(std::move(task));
auto r = p.get_future();
tasks.push_back( std::move(p) );
return r;
}
template<class F, class R=std::result_of_t<F&()>>
std::future<R> run_task( F task ) {
if (threads_active() >= total_threads()) {
start_thread();
}
return queue_task( std::move(task) );
}
void terminate() {
tasks.terminate();
}
std::size_t threads_active() const {
return active;
}
std::size_t total_threads() const {
return threads.size();
}
void clear_threads() {
terminate();
threads.clear();
}
void start_thread( std::size_t n = 1 ) {
while(n-->0) {
threads.push_back(
std::async( std::launch::async,
[this]{
while(auto task = tasks.pop_front()) {
++active;
try{
(*task)();
} catch(...) {
--active;
throw;
}
--active;
}
}
)
);
}
}
private:
std::vector<std::future<void>> threads;
threaded_queue<std::packaged_task<void()>> tasks;
std::atomic<std::size_t> active;
};
copied from another answer of mine.
A thread_pool with 1 thread matches your description pretty much.
The above is only a toy, a real thread pool I'd replace the std::packaged_task<void()> with a move_only_function<void()>, which is all I use it for. (A packaged_task<void()> can hold a packaged_task<R()> amusingly, if inefficiencly).
You will have to reason about shutdown and make a plan. The above code locks up if you try to shut it down without first clearing the threads.
I have the following thread pool implementation:
template<typename... event_args>
class thread_pool{
public:
using handler_type = std::function<void(event_args...)>;
thread_pool(handler_type&& handler, std::size_t N = 4, bool finish_before_exit = true) : _handler(std::forward<handler_type&&>(handler)),_workers(N),_running(true),_finish_work_before_exit(finish_before_exit)
{
for(auto&& worker: _workers)
{
//worker function
worker = std::thread([this]()
{
while (_running)
{
//wait for work
std::unique_lock<std::mutex> _lk{_wait_mutex};
_cv.wait(_lk, [this]{
return !_events.empty() || !_running;
});
//_lk unlocked
//check to see why we woke up
if (!_events.empty()) {//was it new work
std::unique_lock<std::mutex> _readlk(_queue_mutex);
auto data = _events.front();
_events.pop();
_readlk.unlock();
invoke(std::move(_handler), std::move(data));
_cv.notify_all();
}else if(!_running){//was it a signal to exit
break;
}
//or was it spurious and we should just ignore it
}
});
//end worker function
}
}
~thread_pool()
{
if(_finish_work_before_exit)
{//block destruction until all work is done
std::condition_variable _work_remains;
std::mutex _wr;
std::unique_lock<std::mutex> lk{_wr};
_work_remains.wait(lk,[this](){
return _events.empty();
});
}
_running=false;
//let all workers know to exit
_cv.notify_all();
//attempt to join all workers
for(auto&& _worker: _workers)
{
if(_worker.joinable())
{
_worker.join();
}
}
}
handler_type& handler()
{
return _handler;
}
void propagate(event_args&&... args)
{
//lock before push
std::unique_lock<std::mutex> _lk(_queue_mutex);
{
_events.emplace(std::make_tuple(args...));
}
_lk.unlock();//explicit unlock
_cv.notify_one();//let worker know that data is available
}
private:
bool _finish_work_before_exit;
handler_type _handler;
std::queue<std::tuple<event_args...>> _events;
std::vector<std::thread> _workers;
std::atomic_bool _running;
std::condition_variable _cv;
std::mutex _wait_mutex;
std::mutex _queue_mutex;
//helpers used to unpack tuple into function call
template<typename Func, typename Tuple, std::size_t... I>
auto invoke_(Func&& func, Tuple&& t, std::index_sequence<I...>)
{
return func(std::get<I>(std::forward<Tuple&&>(t))...);
}
template<typename Func, typename Tuple, typename Indicies = std::make_index_sequence<std::tuple_size<Tuple>::value>>
auto invoke(Func&& func, Tuple&& t)
{
return invoke_(std::forward<Func&&>(func), std::forward<Tuple&&>(t), Indicies());
}
};
I recently added this section to the destructor:
if(_finish_work_before_exit)
{//block destruction until all work is done
std::condition_variable _work_remains;
std::mutex _wr;
std::unique_lock<std::mutex> lk{_wr};
_work_remains.wait(lk,[this](){
return _events.empty();
});
}
The intent was to have the destructor block until the work queue was fully consumed.
But it seems to put the program into deadlock. aAll of the work does get completed, but the wait does not seem to end when the work is done.
Consider this example main:
std::mutex writemtx;
thread_pool<int> pool{
[&](int i){
std::unique_lock<std::mutex> lk{writemtx};
std::cout<<i<<" : "<<std::this_thread::get_id()<<std::endl;
},
8//threads
};
for (int i=0; i<8192; ++i) {
pool.propagate(std::move(i));
}
How can I have the destructor wait for the completion of the work without causing deadlock?
The reason your code is deadlocked is that _work_remains is a condition variable which is not "notified" by any part of your code. You would need to make that a class attribute and have it notified by any thread that picks up the last event from the _events.
I'm using the Boost ASIO library as a threadpool, which is widely described. However, I want to interrupt each thread, should the thread process for longer than 1 second and move onto the next posted task for the thread.
I can easily implement this using a separate deadline_timer, which is reset if the thread finishes before the deadline or interrupts the thread should the task go on for too long. However I assumed this would be built into ASIO. As it seems natural to have a task, with a timeout for network operations. But I can't see anything in the API for it, to do that succinctly.
Can anyone tell me if this functionality already exists? Or should I implement it the way I described?
Here's a quick solution I knocked together.
It requires that your submitted function objects accept an argument of type exec_context.
The task running in the io_service can query the .canceled() accessor (which is atomic) to determine whether it should cancel early.
It can then either throw an exception or return whatever value it intended to return.
The caller submits via the submit function. This function wraps the worker function with the context object and marshals its return value and/or exception into a std::future.
The caller can then query or wait on this future (or ignore it) as appropriate.
The caller gets a handle object, which has the method cancel() on it. Using this handle, the caller can either cancel, query or wait on the submitted task.
Hope it helps. It was fun to write.
#include <boost/asio.hpp>
#include <iostream>
#include <atomic>
#include <thread>
#include <chrono>
#include <future>
#include <stdexcept>
#include <exception>
#include <utility>
#include <type_traits>
//
// an object to allow the caller to communicate a cancellation request to the
// submitted task
//
struct exec_controller
{
/// #returns previous cancellation request state;
bool notify_cancel()
{
return _should_cancel.exchange(true);
}
bool should_cancel() const {
return _should_cancel;
}
private:
std::atomic<bool> _should_cancel = { false };
};
template<class Ret>
struct exec_state : std::enable_shared_from_this<exec_state<Ret>>
{
using return_type = Ret;
bool notify_cancel() {
return _controller.notify_cancel();
}
std::shared_ptr<exec_controller>
get_controller_ptr() {
return std::shared_ptr<exec_controller>(this->shared_from_this(),
std::addressof(_controller));
}
std::promise<return_type>& promise() { return _promise; }
private:
std::promise<return_type> _promise;
exec_controller _controller;
};
struct applyer;
struct exec_context
{
exec_context(std::shared_ptr<exec_controller> impl)
: _impl(impl)
{}
bool canceled() const {
return _impl->should_cancel();
}
private:
friend applyer;
std::shared_ptr<exec_controller> _impl;
};
struct applyer
{
template<class F, class Ret>
void operator()(F& f, std::shared_ptr<exec_state<Ret>> const& p) const
{
try {
p->promise().set_value(f(exec_context { p->get_controller_ptr() }));
}
catch(...) {
p->promise().set_exception(std::current_exception());
}
}
template<class F>
void operator()(F& f, std::shared_ptr<exec_state<void>> const& p) const
{
try {
f(exec_context { p->get_controller_ptr() });
p->promise().set_value();
}
catch(...) {
p->promise().set_exception(std::current_exception());
}
}
};
template<class Ret>
struct exec_result
{
using return_type = Ret;
exec_result(std::shared_ptr<exec_state<return_type>> p)
: _impl(p)
{}
bool cancel() {
return _impl->notify_cancel();
}
std::future<Ret>& get_future()
{
return _future;
}
private:
std::shared_ptr<exec_state<return_type>> _impl;
std::future<return_type> _future { _impl->promise().get_future() };
};
template<class Executor, class F>
auto submit(Executor& exec, F&& f)
{
using function_type = std::decay_t<F>;
using result_type = std::result_of_t<function_type(exec_context)>;
using state_type = exec_state<result_type>;
auto shared_state = std::make_shared<state_type>();
exec.post([shared_state, f = std::forward<F>(f)]
{
applyer()(f, shared_state);
});
return exec_result<result_type>(std::move(shared_state));
}
int main()
{
using namespace std::literals;
boost::asio::io_service ios;
boost::asio::io_service::strand strand(ios);
boost::asio::io_service::work work(ios);
std::thread runner([&] { ios.run(); });
std::thread runner2([&] { ios.run(); });
auto func = [](auto context)
{
for(int i = 0 ; i < 1000 ; ++i)
{
if (context.canceled())
throw std::runtime_error("canceled");
std::this_thread::sleep_for(100ms);
}
};
auto handle = submit(strand, func);
auto handle2 = submit(ios, [](auto context) { return 2 + 2; });
// cancel the handle, or wait on it as you wish
std::this_thread::sleep_for(1s);
handle.cancel();
handle2.cancel(); // prove that late cancellation is a nop
try {
std::cout << "2 + 2 is " << handle2.get_future().get() << std::endl;
}
catch(std::exception& e)
{
std::cerr << "failed to add 2 + 2 : " << e.what() << std::endl;
}
try {
handle.get_future().get();
std::cout << "task completed" << std::endl;
}
catch(std::exception const& e) {
std::cout << "task threw exception: " << e.what() << std::endl;
}
ios.stop();
runner.join();
runner2.join();
}
update: v2 adds some privacy protection to the classes, demonstrates 2 simultaneous tasks.
expected output:
2 + 2 is 4
task threw exception: canceled