How to wait for an asio handler? - c++

I have an object that runs around a boost::asio::io_service which has some properties. Something like that:
class Foo
{
private:
// Not an int in my real code, but it doesn't really matter.
int m_bar;
boost::asio::io_service& m_io_service;
boost::asio::strand m_bar_strand;
};
m_bar is to be used only from a handler that is called through the strand m_bar_strand. This allows me not to lock from within those handlers.
To set the m_bar property from outside a thread that runs io_service::run() I wrote an asynchronous_setter, like so:
class Foo
{
public:
void async_get_bar(function<void (int)> handler)
{
m_bar_strand.post(bind(&Foo::do_get_bar, this, handler));
}
void async_set_bar(int value, function<void ()> handler)
{
m_bar_strand.post(bind(&Foo::do_set_bar, this, value, handler));
}
private:
void do_get_bar(function<void (int)> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
// Run the handler to notify the caller.
handler(m_bar);
}
void do_set_bar(int value, function<void ()> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
m_bar = value;
// Run the handler to notify the caller.
handler();
}
int m_bar;
boost::asio::io_service& m_io_service;
boost::asio::strand m_bar_strand;
};
This works perfectly but now I'd like to write a synchronous version of set_bar that sets the value and returns only when the set was effective. It must still guarantee that the effective set will occur within the m_bar_strand. Ideally, something reentrant.
I can imagine solutions with semaphores that would be modified from within the handler but everything I come up seems hackish and really not elegant. Is there something in Boost/Boost Asio that allows such a thing?
How would you proceed to implement this method?

If you need to synchronously wait on a value to be set, then Boost.Thread's futures may provide an elegant solution:
The futures library provides a means of handling synchronous future values, whether those values are generated by another thread, or on a single thread in response to external stimuli, or on-demand.
In short, a boost::promise is created and allows for a value to be set on it. The value can later be retrieved via an associated boost::future. Here is a basic example:
boost::promise<int> promise;
boost::unique_future<int> future = promise.get_future();
// start asynchronous operation that will invoke future.set_value(42)
...
assert(future.get() == 42); // blocks until future has been set.
Two other notable benefits to this approach:
future is part of C++11.
Exceptions can even be passed to future via promise::set_exception(), supporting an elegant way to provide exceptions or errors to the caller.
Here is a complete example based on the original code:
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
class Foo
{
public:
Foo(boost::asio::io_service& io_service)
: m_io_service(io_service),
m_bar_strand(io_service)
{}
public:
void async_get_bar(boost::function<void(int)> handler)
{
m_bar_strand.post(bind(&Foo::do_get_bar, this, handler));
}
void async_set_bar(int value, boost::function<void()> handler)
{
m_bar_strand.post(bind(&Foo::do_set_bar, this, value, handler));
}
int bar()
{
typedef boost::promise<int> promise_type;
promise_type promise;
// Pass the handler to async operation that will set the promise.
void (promise_type::*setter)(const int&) = &promise_type::set_value;
async_get_bar(boost::bind(setter, &promise, _1));
// Synchronously wait for promise to be fulfilled.
return promise.get_future().get();
}
void bar(int value)
{
typedef boost::promise<void> promise_type;
promise_type promise;
// Pass the handler to async operation that will set the promise.
async_set_bar(value, boost::bind(&promise_type::set_value, &promise));
// Synchronously wait for the future to finish.
promise.get_future().wait();
}
private:
void do_get_bar(boost::function<void(int)> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
// Run the handler to notify the caller.
handler(m_bar);
}
void do_set_bar(int value, boost::function<void()> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
m_bar = value;
// Run the handler to notify the caller.
handler();
}
int m_bar;
boost::asio::io_service& m_io_service;
boost::asio::strand m_bar_strand;
};
int main()
{
boost::asio::io_service io_service;
boost::asio::io_service::work work(io_service);
boost::thread t(
boost::bind(&boost::asio::io_service::run, boost::ref(io_service)));
Foo foo(io_service);
foo.bar(21);
std::cout << "foo.bar is " << foo.bar() << std::endl;
foo.bar(2 * foo.bar());
std::cout << "foo.bar is " << foo.bar() << std::endl;
io_service.stop();
t.join();
}
which provides the following output:
foo.bar is 21
foo.bar is 42

You could use a pipe to notify the synchronous method when the value is set in async_set_bar(). Warning, the below code is brain-compiled and likely has errors but it should get the point across
#include <boost/asio.hpp>
#include <iostream>
#include <thread>
class Foo
{
public:
Foo( boost::asio::io_service& io_service ) :
_bar( 0 ),
_io_service( io_service ),
_strand( _io_service ),
_readPipe( _io_service ),
_writePipe( _io_service )
{
boost::asio::local::connect_pair( _readPipe, _writePipe );
}
void set_async( int v ) {
_strand.post( [=]
{
_bar = v;
std::cout << "sending " << _bar << std::endl;
_writePipe.send( boost::asio::buffer( &_bar, sizeof(_bar) ) );
}
);
}
void set_sync( int v ) {
this->set_async( v );
int value;
_readPipe.receive( boost::asio::buffer(&value, sizeof(value) ) );
std::cout << "set value to " << value << std::endl;
}
private:
int _bar;
boost::asio::io_service& _io_service;
boost::asio::io_service::strand _strand;
boost::asio::local::stream_protocol::socket _readPipe;
boost::asio::local::stream_protocol::socket _writePipe;
};
int
main()
{
boost::asio::io_service io_service;
boost::asio::io_service::work w(io_service);
std::thread t( [&]{ io_service.run(); } );
Foo f( io_service );
f.set_sync( 20 );
io_service.stop();
t.join();
}
if you are unable to use c++11 lambdas, replace them with boost::bind and some more completion handler methods.

This is what I came up with:
class synchronizer_base
{
protected:
synchronizer_base() :
m_has_result(false),
m_lock(m_mutex)
{
}
void wait()
{
while (!m_has_result)
{
m_condition.wait(m_lock);
}
}
void notify_result()
{
m_has_result = true;
m_condition.notify_all();
}
private:
boost::atomic<bool> m_has_result;
boost::mutex m_mutex;
boost::unique_lock<boost::mutex> m_lock;
boost::condition_variable m_condition;
};
template <typename ResultType = void>
class synchronizer : public synchronizer_base
{
public:
void operator()(const ResultType& result)
{
m_result = result;
notify_result();
}
ResultType wait_result()
{
wait();
return m_result;
}
private:
ResultType m_result;
};
template <>
class synchronizer<void> : public synchronizer_base
{
public:
void operator()()
{
notify_result();
}
void wait_result()
{
wait();
}
};
And I can use it, that way:
class Foo
{
public:
void async_get_bar(function<void (int)> handler)
{
m_bar_strand.post(bind(&Foo::do_get_bar, this, value, handler));
}
void async_set_bar(int value, function<void ()> handler)
{
m_bar_strand.post(bind(&Foo::do_set_bar, this, value, handler));
}
int get_bar()
{
synchronizer<int> sync;
async_get_bar(boost::ref(sync));
return sync.wait_result();
}
void set_bar(int value)
{
synchronizer<void> sync;
async_set_bar(value, boost::ref(sync));
sync.wait_result();
}
};
The boost::ref is necessary because the instances of synchronizer are non-copyable. This could be avoided by wrapping synchronizer in some other container-class, but I'm fine with that solution as it is.
Note: Do NOT call such "synchronized" functions from inside a handler or it might just deadlock !

Related

Make a Timer with boost::asio::deadline_timer

I want to make a Timer class with boost::asio::deadline_timer. I looked into this:
How do I make the boost/asio library repeat a timer?
class DeadlineTimer
{
boost::asio::io_service io;
std::function<void()> fun;
boost::asio::deadline_timer t;
void runTimer()
{
fun();
t.expires_at(t.expires_at() + boost::posix_time::seconds(2));
t.async_wait(boost::bind(&DeadlineTimer::runTimer, this));
}
public:
DeadlineTimer() :t(io, boost::posix_time::seconds(2)){}
void setFunction(std::function<void()> _f)
{
fun = _f;
}
void run()
{
io.run();
}
};
void test()
{
DeadlineTimer timer1;
auto f = []() {
cout << "hello world\n";
};
timer1.setFunction(f);
timer1.run();
}
It allows user to pass a self-defined timer function via timer1.setFunction(f);. Then repeatedly run it (in every 2 second under current circumstance).
But it doesn't work, no output at all.
After some trial-and-error, I’ve managed to update David Wyles’ boost::asio::repeating_timer class to work with Boost >= 1.66 - this neatly encapsulates the functionality of a repeating timer. Online at https://github.com/mikehaben69/boost, including demo source and makefile.

Thread-safe reference-counted queue C++

I'm struggling to implement a thread-safe reference-counted queue. The idea is that I have a number of tasks that each maintain a shared_ptr to a task manager that owns the queue. Here is a minimal implementation that should encounter that same issue:
#include <condition_variable>
#include <deque>
#include <functional>
#include <iostream>
#include <memory>
#include <mutex>
#include <thread>
namespace {
class TaskManager;
struct Task {
std::function<void()> f;
std::shared_ptr<TaskManager> manager;
};
class Queue {
public:
Queue()
: _queue()
, _mutex()
, _cv()
, _running(true)
, _thread([this]() { sweepQueue(); })
{
}
~Queue() { close(); }
void close() noexcept
{
try {
{
std::lock_guard<std::mutex> lock(_mutex);
if (!_running) {
return;
}
_running = false;
}
_cv.notify_one();
_thread.join();
} catch (...) {
std::cerr << "An error occurred while closing the queue\n";
}
}
void push(Task&& task)
{
std::unique_lock<std::mutex> lock(_mutex);
_queue.emplace_back(std::move(task));
lock.unlock();
_cv.notify_one();
}
private:
void sweepQueue() noexcept
{
while (true) {
try {
std::unique_lock<std::mutex> lock(_mutex);
_cv.wait(lock, [this] { return !_running || !_queue.empty(); });
if (!_running && _queue.empty()) {
return;
}
if (!_queue.empty()) {
const auto task = _queue.front();
_queue.pop_front();
task.f();
}
} catch (...) {
std::cerr << "An error occurred while sweeping the queue\n";
}
}
}
std::deque<Task> _queue;
std::mutex _mutex;
std::condition_variable _cv;
bool _running;
std::thread _thread;
};
class TaskManager : public std::enable_shared_from_this<TaskManager> {
public:
void addTask(std::function<void()> f)
{
_queue.push({ f, shared_from_this() });
}
private:
Queue _queue;
};
} // anonymous namespace
int main(void)
{
const auto manager = std::make_shared<TaskManager>();
manager->addTask([]() { std::cout << "Hello world\n"; });
}
The problem I find is that on rare occasions, the queue will try to invoke its own destructor within the sweepQueue method. Upon further inspection, it seems that the reference count on the TaskManager hits zero once the last task is dequeued. How can I safely maintain the reference count without invoking the destructor?
Update: The example does not clarify the need for the std::shared_ptr<TaskManager> within Task. Here is an example use case that should illustrate the need for this seemingly unnecessary ownership cycle.
std::unique_ptr<Task> task;
{
const auto manager = std::make_shared<TaskManager>();
task = std::make_unique<Task>(someFunc, manager);
}
// Guarantees manager is not destroyed while task is still in scope.
The ownership hierarchy here is TaskManager owns Queue and Queue owns Tasks. Tasks maintaining a shared pointer to TaskManager create an ownership cycle which does not seem to serve a useful purpose here.
This is the ownership what is root of the problem here. A Queue is owned by TaskManager, so that Queue can have a plain pointer to TaskManager and pass that pointer to Task in sweepQueue. You do not need std::shared_pointer<TaskManager> in Task at all here.
I'd refactor the queue from the thread first.
But to fix your problem:
struct am_I_alive {
explicit operator bool() const { return m_ptr.lock(); }
private:
std::weak_ptr<void> m_ptr;
};
struct lifetime_tracker {
am_I_alive track_lifetime() {
if (!m_ptr) m_ptr = std::make_shared<bool>(true);
return {m_ptr};
}
lifetime_tracker() = default;
lifetime_tracker(lifetime_tracker const&) {} // do nothing, don't copy
lifetime_tracker& operator=(lifetime_tracker const&){ return *this; }
private:
std::shared_ptr<void> m_ptr;
};
this is a little utility to detect if we have been deleted. It is useful in any code that calls an arbitrary callback whose side effect could include delete(this).
Privately inherit your Queue from it.
Then split popping the task from running it.
std::optional<Task> get_task() {
std::unique_lock<std::mutex> lock(_mutex);
_cv.wait(lock, [this] { return !_running || !_queue.empty(); });
if (!_running && _queue.empty()) {
return {}; // end
}
auto task = _queue.front();
_queue.pop_front();
return task;
}
void sweepQueue() noexcept
{
while (true) {
try {
auto task = get_task();
if (!task) return;
// we are alive here
auto alive = track_lifetime();
try {
(*task).f();
} catch(...) {
std::cerr << "An error occurred while running a task\n";
}
task={};
// we could be deleted here
if (!alive)
return; // this was deleted, get out of here
}
} catch (...) {
std::cerr << "An error occurred while sweeping the queue\n";
}
}
}
and now you are safe.
After that you need to deal with the thread problem.
The thread problem is that you need your code to destroy the thread from within the thread it is running. At the same time, you also need to guarantee that the thread has terminated before main ends.
These are not compatible.
To fix that, you need to create a thread owning pool that doesn't have your "keep alive" semantics, and get your thread from there.
These threads don't delete themselves; instead, they return themselves to that pool for reuse by another client.
At shutdown, those threads are blocked on to ensure you don't have code running elsewhere that hasn't halted before the end of main.
To write such a pool without your inverted dependency mess, split the queue part of your code off. This queue owns no thread.
template<class T>
struct threadsafe_queue {
void push(T);
std::optional<T> pop(); // returns empty if thread is aborted
void abort();
~threadsafe_queue();
private:
std::mutex m;
std::condition_variable v;
std::deque<T> data;
bool aborted = false;
};
then a simple thread pool:
struct thread_pool {
template<class F>
std::future<std::result_of_t<F&()>> enqueue( F&& f );
template<class F>
std::future<std::result_of_t<F&()>> thread_off_now( F&& f ); // starts a thread if there aren't any free
void abort();
void start_thread( std::size_t n = 1 );
std::size_t count_threads() const;
~thread_pool();
private:
threadsafe_queue< std::function<void()> > tasks;
std::vector< std::thread > threads;
static void thread_loop( thread_pool* pool );
};
make a thread pool singleton. Get your threads for your queue from thread_off_now method, guaranteeing you a thread that (when you are done with it) can be recycled, and whose lifetime is handled by someone else.
But really, you should instead be thinking with ownership in mind. The idea that tasks and task queues mutually own each other is a mess.
If someone disposes of a task queue, it is probably a good idea to abandon the tasks instead of persisting it magically and silently.
Which is what my simple thread pool does.

Create boost::thread without callable object

I would like to create boost::thread without passing callable object to constructor ( not-any-thread state ).
boost::shared_ptr<boost::thread> ThreadHandle_.reset( new boost::thread() );
but how can i later pass callable object ?
void foo() { }
boost::thread th; // not-any-thread
th = boost::thread(foo); // bound
You can do exactly the same with the shared ptr complication
You can create your own wrapper on top of boost::thread, something similar to:
class QThread
{
public:
QThread();
virtual ~QThread();
void operator()(const boost::function0<void>& action);
void Join();
private:
void Process();
std::list<boost::function0<void> > m_Queue;
bool m_Destroy;
boost::condition_variable_any m_Available;
boost::mutex m_QueueLock;
boost::barrier m_Barrier;
boost::thread m_Thread;
};
and implement Process in a way it wait until you put something to do ( a task, as a callable function in the queue)
void QThread::Process()
{
m_Barrier.wait();
boost::mutex::scoped_lock lock(m_QueueLock);
while(!m_Destroy)
{
while(!m_Destroy && !m_Queue.empty())
{
boost::function0<void> action = m_Queue.front();
m_Queue.pop_front();
lock.unlock();
action();
lock.lock();
}
if (!m_Destroy)
{
m_Available.wait(lock);
}
}
}

Proper cleanup with a suspended coroutine

I'm wondering what the best (cleanest, hardest to mess up) method for cleanup is in this situation.
void MyClass::do_stuff(boost::asio::yield_context context) {
while (running_) {
uint32_t data = async_buffer->Read(context);
// do other stuff
}
}
Read is a call which asynchronously waits until there is data to be read, then returns that data. If I want to delete this instance of MyClass, how can I make sure I do so properly? Let's say that the asynchronous wait here is performed via a deadline_timer's async_wait. If I cancel the event, I still have to wait for the thread to finish executing the "other stuff" before I know things are in a good state (I can't join the thread, as it's a thread that belongs to the io service that may also be handling other jobs). I could do something like this:
MyClass::~MyClass() {
running_ = false;
read_event->CancelEvent(); // some way to cancel the deadline_timer the Read is waiting on
boost::mutex::scoped_lock lock(finished_mutex_);
if (!finished_) {
cond_.wait(lock);
}
// any other cleanup
}
void MyClass::do_stuff(boost::asio::yield_context context) {
while (running_) {
uint32_t data = async_buffer->Read(context);
// do other stuff
}
boost::mutex::scoped_lock lock(finished_mutex_);
finished_ = true;
cond.notify();
}
But I'm hoping to make these stackful coroutines as easy to use as possible, and it's not straightforward for people to recognize that this condition exists and what would need to be done to make sure things are cleaned up properly. Is there a better way? Is what I'm trying to do here wrong at a more fundamental level?
Also, for the event (what I have is basically the same as Tanner's answer here) I need to cancel it in a way that I'd have to keep some extra state (a true cancel vs. the normal cancel used to fire the event) -- which wouldn't be appropriate if there were multiple pieces of logic waiting on that same event. Would love to hear if there's a better way to model the asynchronous event to be used with a coroutine suspend/resume.
Thanks.
EDIT: Thanks #Sehe, took a shot at a working example, I think this illustrates what I'm getting at:
class AsyncBuffer {
public:
AsyncBuffer(boost::asio::io_service& io_service) :
write_event_(io_service) {
write_event_.expires_at(boost::posix_time::pos_infin);
}
void Write(uint32_t data) {
buffer_.push_back(data);
write_event_.cancel();
}
uint32_t Read(boost::asio::yield_context context) {
if (buffer_.empty()) {
write_event_.async_wait(context);
}
uint32_t data = buffer_.front();
buffer_.pop_front();
return data;
}
protected:
boost::asio::deadline_timer write_event_;
std::list<uint32_t> buffer_;
};
class MyClass {
public:
MyClass(boost::asio::io_service& io_service) :
running_(false), io_service_(io_service), buffer_(io_service) {
}
void Run(boost::asio::yield_context context) {
while (running_) {
boost::system::error_code ec;
uint32_t data = buffer_.Read(context[ec]);
// do something with data
}
}
void Write(uint32_t data) {
buffer_.Write(data);
}
void Start() {
running_ = true;
boost::asio::spawn(io_service_, boost::bind(&MyClass::Run, this, _1));
}
protected:
boost::atomic_bool running_;
boost::asio::io_service& io_service_;
AsyncBuffer buffer_;
};
So here, let's say that the buffer is empty and MyClass::Run is currently suspended while making a call to Read, so there's a deadline_timer.async_wait that's waiting for the event to fire to resume that context. It's time to destroy this instance of MyClass, so how do we make sure that it gets done cleanly.
A more typical approach would be to use boost::enable_shared_from_this with MyClass, and run the methods as bound to the shared pointer.
Boost Bind supports binding to boost::shared_ptr<MyClass> transparently.
This way, you can automatically have the destructor run only when the last user disappears.
If you create a SSCCE, I'm happy to change it around, to show what I mean.
UPDATE
To the SSCCEE: Some remarks:
I imagined a pool of threads running the IO service
The way in which MyClass calls into AsyncBuffer member functions directly is not threadsafe. There is actually no thread safe way to cancel the event outside the producer thread[1], since the producer already access the buffer for Writeing. This could be mitigated using a strand (in the current setup I don't see how MyClass would likely be threadsafe). Alternatively, look at the active object pattern (for which Tanner has an excellent answer[2] on SO).
I chose the strand approach here, for simplicity, so we do:
void MyClass::Write(uint32_t data) {
strand_.post(boost::bind(&AsyncBuffer::Write, &buffer_, data));
}
You ask
Also, for the event (what I have is basically the same as Tanner's answer here) I need to cancel it in a way that I'd have to keep some extra state (a true cancel vs. the normal cancel used to fire the event)
The most natural place for this state is the usual for the deadline_timer: it's deadline. Stopping the buffer is done by resetting the timer:
void AsyncBuffer::Stop() { // not threadsafe!
write_event_.expires_from_now(boost::posix_time::seconds(-1));
}
This at once cancels the timer, but is detectable because the deadline is in the past.
Here's a simple demo with a a group of IO service threads, one "producer coroutine" that produces random numbers and a "sniper thread" that snipes the MyClass::Run coroutine after 2 seconds. The main thread is the sniper thread.
See it Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include <boost/atomic.hpp>
#include <list>
#include <iostream>
// for refcounting:
#include <boost/enable_shared_from_this.hpp>
#include <boost/make_shared.hpp>
namespace asio = boost::asio;
class AsyncBuffer {
friend class MyClass;
protected:
AsyncBuffer(boost::asio::io_service &io_service) : write_event_(io_service) {
write_event_.expires_at(boost::posix_time::pos_infin);
}
void Write(uint32_t data) {
buffer_.push_back(data);
write_event_.cancel();
}
uint32_t Read(boost::asio::yield_context context) {
if (buffer_.empty()) {
boost::system::error_code ec;
write_event_.async_wait(context[ec]);
if (ec != boost::asio::error::operation_aborted || write_event_.expires_from_now().is_negative())
{
if (context.ec_)
*context.ec_ = boost::asio::error::operation_aborted;
return 0;
}
}
uint32_t data = buffer_.front();
buffer_.pop_front();
return data;
}
void Stop() {
write_event_.expires_from_now(boost::posix_time::seconds(-1));
}
private:
boost::asio::deadline_timer write_event_;
std::list<uint32_t> buffer_;
};
class MyClass : public boost::enable_shared_from_this<MyClass> {
boost::atomic_bool stopped_;
public:
MyClass(boost::asio::io_service &io_service) : stopped_(false), buffer_(io_service), strand_(io_service) {}
void Run(boost::asio::yield_context context) {
while (!stopped_) {
boost::system::error_code ec;
uint32_t data = buffer_.Read(context[ec]);
if (ec == boost::asio::error::operation_aborted)
break;
// do something with data
std::cout << data << " " << std::flush;
}
std::cout << "EOF\n";
}
bool Write(uint32_t data) {
if (!stopped_) {
strand_.post(boost::bind(&AsyncBuffer::Write, &buffer_, data));
}
return !stopped_;
}
void Start() {
if (!stopped_) {
stopped_ = false;
boost::asio::spawn(strand_, boost::bind(&MyClass::Run, shared_from_this(), _1));
}
}
void Stop() {
stopped_ = true;
strand_.post(boost::bind(&AsyncBuffer::Stop, &buffer_));
}
~MyClass() {
std::cout << "MyClass destructed because no coroutines hold a reference to it anymore\n";
}
protected:
AsyncBuffer buffer_;
boost::asio::strand strand_;
};
int main()
{
boost::thread_group tg;
asio::io_service svc;
{
// Start the consumer:
auto instance = boost::make_shared<MyClass>(svc);
instance->Start();
// Sniper in 2 seconds :)
boost::thread([instance]{
boost::this_thread::sleep_for(boost::chrono::seconds(2));
instance->Stop();
}).detach();
// Start the producer:
auto producer_coro = [instance, &svc](asio::yield_context c) { // a bound function/function object in C++03
asio::deadline_timer tim(svc);
while (instance->Write(rand())) {
tim.expires_from_now(boost::posix_time::milliseconds(200));
tim.async_wait(c);
}
};
asio::spawn(svc, producer_coro);
// Start the service threads:
for(size_t i=0; i < boost::thread::hardware_concurrency(); ++i)
tg.create_thread(boost::bind(&asio::io_service::run, &svc));
}
// now `instance` is out of scope, it will selfdestruct after the snipe
// completed
boost::this_thread::sleep_for(boost::chrono::seconds(3)); // wait longer than the snipe
std::cout << "This is the main thread _after_ MyClass self-destructed correctly\n";
// cleanup service threads
tg.join_all();
}
[1] logical thread, this could be a coroutine that gets resumed on different threads
[2] boost::asio and Active Object

Can I create a software watchdog timer thread in C++ using Boost Signals2 and Threads?

I am running function Foo from somebody else's library in a single-threaded application currently. Most of the time, I make a call to Foo and it's really quick, some times, I make a call to Foo and it takes forever. I am not a patient man, if Foo is going to take forever, I want to stop execution of Foo and not call it with those arguments.
What is the best way to call Foo in a controlled manner (my current environment is POSIX/C++) such that I can stop execution after a certain number of seconds. I feel like the right thing to do here is to create a second thread to call Foo, while in my main thread I create a timer function that will eventually signal the second thread if it runs out of time.
Is there another, more apt model (and solution)? If not, would Boost's Signals2 library and Threads do the trick?
You can call Foo on a second thread with a timeout. For example:
#include <boost/date_time.hpp>
#include <boost/thread/thread.hpp>
boost::posix_time::time_duration timeout = boost::posix_time::milliseconds(500);
boost::thread thrd(&Foo);
if (thrd.timed_join(timeout))
{
//finished
}
else
{
//Not finished;
}
You can use the following class:
class timer
{
typedef boost::signals2::signal<void ()> timeout_slot;
public:
typedef timeout_slot::slot_type timeout_slot_t;
public:
timer() : _interval(0), _is_active(false) {};
timer(int interval) : _interval(interval), _is_active(false) {};
virtual ~timer() { stop(); };
inline boost::signals2::connection connect(const timeout_slot_t& subscriber) { return _signalTimeout.connect(subscriber); };
void start()
{
boost::lock_guard<boost::mutex> lock(_guard);
if (is_active())
return; // Already executed.
if (_interval <= 0)
return;
_timer_thread.interrupt();
_timer_thread.join();
timer_worker job;
_timer_thread = boost::thread(job, this);
_is_active = true;
};
void stop()
{
boost::lock_guard<boost::mutex> lock(_guard);
if (!is_active())
return; // Already executed.
_timer_thread.interrupt();
_timer_thread.join();
_is_active = false;
};
inline bool is_active() const { return _is_active; };
inline int get_interval() const { return _interval; };
void set_interval(const int msec)
{
if (msec <= 0 || _interval == msec)
return;
boost::lock_guard<boost::mutex> lock(_guard);
// Keep timer activity status.
bool was_active = is_active();
if (was_active)
stop();
// Initialize timer with new interval.
_interval = msec;
if (was_active)
start();
};
protected:
friend struct timer_worker;
// The timer worker thread.
struct timer_worker
{
void operator()(timer* t)
{
boost::posix_time::milliseconds duration(t->get_interval());
try
{
while (1)
{
boost::this_thread::sleep<boost::posix_time::milliseconds>(duration);
{
boost::this_thread::disable_interruption di;
{
t->_signalTimeout();
}
}
}
}
catch (boost::thread_interrupted const& )
{
// Handle the thread interruption exception.
// This exception raises on boots::this_thread::interrupt.
}
};
};
protected:
int _interval;
bool _is_active;
boost::mutex _guard;
boost::thread _timer_thread;
// Signal slots
timeout_slot _signalTimeout;
};
An example of usage:
void _test_timer_handler()
{
std::cout << "_test_timer_handler\n";
}
BOOST_AUTO_TEST_CASE( test_timer )
{
emtorrus::timer timer;
BOOST_CHECK(!timer.is_active());
BOOST_CHECK(timer.get_interval() == 0);
timer.set_interval(1000);
timer.connect(_test_timer_handler);
timer.start();
BOOST_CHECK(timer.is_active());
std::cout << "timer test started\n";
boost::this_thread::sleep<boost::posix_time::milliseconds>(boost::posix_time::milliseconds(5500));
timer.stop();
BOOST_CHECK(!timer.is_active());
BOOST_CHECK(_test_timer_count == 5);
}
You can also set an alarm right before calling that function, and catch SIGALRM.
Vlad, excellent post! Your code compiled and works beautifully. I implemented a software watchdog timer with it. I made a few modifications:
To prevent pointer decay, store the signal in boost::shared_ptr and pass this to the thread worker instead of a weak pointer to the timer class. This eliminates the need for the thread worker to be a friend struct and guarantees the signal is in memory.
Add parameter _is_periodic to allow the caller to select whether or not the worker thread is periodic or if it terminates after expiration.
Store _is_active, _interval and _is_periodic in boost::atomic to allow thread-safe access.
Narrow the scope of mutex locking.
Add reset() method to "kick" the timer, preventing it from issuing the expiration signal.
With these changes applied:
#include <atomic>
#include <boost/signals2.hpp>
#include <boost/thread.hpp>
class IntervalThread
{
using interval_signal = boost::signals2::signal<void(void)>;
public:
using interval_slot_t = interval_signal::slot_type;
IntervalThread(const int interval_ms = 60)
: _interval_ms(interval_ms),
_is_active(false),
_is_periodic(false),
_signal_expired(new interval_signal()) {};
inline ~IntervalThread(void) { stop(); };
boost::signals2::connection connect(const interval_slot_t &subscriber)
{
// thread-safe: signals2 obtains a mutex on connect()
return _signal_expired->connect(subscriber);
};
void start(void)
{
if (is_active())
return; // Already executed.
if (get_interval_ms() <= 0)
return;
boost::lock_guard<boost::mutex> lock(_timer_thread_guard);
_timer_thread.interrupt();
_timer_thread.join();
_timer_thread = boost::thread(timer_worker(),
static_cast<int>(get_interval_ms()),
static_cast<bool>(is_periodic()),
_signal_expired);
_is_active = true;
};
void reset(void)
{
if (is_active())
stop();
start();
}
void stop(void)
{
if (!is_active())
return; // Already executed.
boost::lock_guard<boost::mutex> lock(_timer_thread_guard);
_timer_thread.interrupt();
_timer_thread.join();
_is_active = false;
};
inline bool is_active(void) const { return _is_active; };
inline int get_interval_ms(void) const { return _interval_ms; };
void set_interval_ms(const int interval_ms)
{
if (interval_ms <= 0 || get_interval_ms() == interval_ms)
return;
// Cache timer activity state.
const bool was_active = is_active();
// Initialize timer with new interval.
if (was_active)
stop();
_interval_ms = interval_ms;
if (was_active)
start();
};
inline bool is_periodic(void) const { return _is_periodic; }
inline void set_periodic(const bool is_periodic = true) { _is_periodic = is_periodic; }
private:
// The timer worker for the interval thread.
struct timer_worker {
void operator()(const int interval_ms, const bool is_periodic, boost::shared_ptr<interval_signal> signal_expired)
{
boost::posix_time::milliseconds duration(interval_ms);
try {
do {
boost::this_thread::sleep<boost::posix_time::milliseconds>(duration);
{
boost::this_thread::disable_interruption di;
signal_expired->operator()();
}
} while (is_periodic);
} catch (const boost::thread_interrupted &) {
// IntervalThread start(), stop() and reset() throws boost::this_thread::interrupt,
// which is expected since this thread is interrupted. No action neccessary.
}
};
};
std::atomic<int> _interval_ms; // Interval, in ms
std::atomic<bool> _is_active; // Is the timed interval active?
std::atomic<bool> _is_periodic; // Is the timer periodic?
boost::mutex _timer_thread_guard;
boost::thread _timer_thread;
// The signal to call on interval expiration.
boost::shared_ptr<interval_signal> _signal_expired;
};