I want to make a Timer class with boost::asio::deadline_timer. I looked into this:
How do I make the boost/asio library repeat a timer?
class DeadlineTimer
{
boost::asio::io_service io;
std::function<void()> fun;
boost::asio::deadline_timer t;
void runTimer()
{
fun();
t.expires_at(t.expires_at() + boost::posix_time::seconds(2));
t.async_wait(boost::bind(&DeadlineTimer::runTimer, this));
}
public:
DeadlineTimer() :t(io, boost::posix_time::seconds(2)){}
void setFunction(std::function<void()> _f)
{
fun = _f;
}
void run()
{
io.run();
}
};
void test()
{
DeadlineTimer timer1;
auto f = []() {
cout << "hello world\n";
};
timer1.setFunction(f);
timer1.run();
}
It allows user to pass a self-defined timer function via timer1.setFunction(f);. Then repeatedly run it (in every 2 second under current circumstance).
But it doesn't work, no output at all.
After some trial-and-error, I’ve managed to update David Wyles’ boost::asio::repeating_timer class to work with Boost >= 1.66 - this neatly encapsulates the functionality of a repeating timer. Online at https://github.com/mikehaben69/boost, including demo source and makefile.
Related
I have a function that sets up a timer under the hood which I'd like to test with GMock without using any delays in a unit test.
I could use EXPECT_CALL to determine TimerWrapper is invoked freq times but how could I verify each run was spaced out at interval without using explicit delays?
// Wrapper around CreateTimer utility
void TimerWrapper(int freq, int interval, std::function<void()> callback)
{
// run callback freq times spaced out at interval
CreateTimer(freq, interval, callback);
}
TEST_F(TestTimer)
{
// verify TimerWrapper runs X times every interval
// so if interval = 1s, freq = 5, there should be 5 times the callback would be invoked in 5 seconds
}
Here is an example for splitting test and production code behavior for timed callbacks.
Live demo : https://onlinegdb.com/gm75fZefIZ
The reason I used a condition variable is that unlike std::this_thread::sleep, it allows you to break out of the loop immediately when a stop is requested, resulting in better shutdown behavior.
#include <chrono>
#include <condition_variable>
#include <future>
#include <iostream>
using namespace std::chrono_literals;
//---------------------------------------------------------------------------------------------------------------------
// the scheduling interface
class scheduler_itf
{
public:
virtual void call_every(const std::chrono::steady_clock::duration interval, std::function<void()> fn) = 0;
virtual ~scheduler_itf() = default;
protected:
scheduler_itf() = default;
};
//---------------------------------------------------------------------------------------------------------------------
// note a very simple implementation of a scheduler
// uses one thread per schedule and only one callback can be scheduled
// a production version would be able to run multiple scheduled callbacks
//
class scheduler_t final :
public scheduler_itf
{
public:
scheduler_t() :
m_stop{ false }
{
}
~scheduler_t()
{
std::unique_lock<std::mutex> lock{ m_mtx };
m_stop = true;
m_cv.notify_all();
// the background thread will stop
// destructor of future will synchronize with thread actually having stopped
}
void call_every(const std::chrono::steady_clock::duration interval, std::function<void()> fn) override
{
m_future = std::async(std::launch::async, [=]
{
std::unique_lock<std::mutex> lock{ m_mtx };
// wait for interval or until m_running becomes false (which happens during destruction)
while (!m_cv.wait_for(lock, interval, [&] { return m_stop; }))
{
fn();
}
});
}
private:
std::future<void> m_future;
std::mutex m_mtx;
std::condition_variable m_cv;
bool m_stop;
};
//---------------------------------------------------------------------------------------------------------------------
// scheduler version for unit tests.
// it is passive and will only run one step when asked for.
class test_sheduler_t :
public scheduler_itf
{
public:
void call_every(const std::chrono::steady_clock::duration interval, std::function<void()> fn) override
{
m_callback = fn;
}
void execute_next()
{
m_callback();
}
private:
std::function<void()> m_callback;
};
//---------------------------------------------------------------------------------------------------------------------
// object under test
// use dependency injection for the scheduler
// so you can either have the production version of the scheduler or the version for unit testing
class my_object_t
{
public:
// pass a reference to scheduler, since it will have a longer live cycle then my_object_t instances
my_object_t(scheduler_itf& scheduler) :
m_scheduler{ scheduler }
{
m_scheduler.call_every(500ms, [this] { callback(); });
}
private:
void callback()
{
std::cout << "." << std::flush;
}
scheduler_itf& m_scheduler;
};
//---------------------------------------------------------------------------------------------------------------------
int main()
{
// start a scope to manage the lifetime of the scheduler and object
// this scope contains the production code
{
std::cout << "production code ouput : ";
scheduler_t scheduler;
// inject scheduler into object.
my_object_t object{ scheduler };
// let mainthread sleep
std::this_thread::sleep_for(4s);
// scheduler goes out of scope
// which will call its destructor, which will gracefully stop the scheduling thread
std::cout << " done\n";
}
// this scope contains the code for unit testing.
{
std::cout << "test code ouput : ";
test_sheduler_t test_scheduler;
my_object_t object{ test_scheduler };
// to simulate a time loop in the scheduler without delay just call execute_next
test_scheduler.execute_next();
test_scheduler.execute_next();
test_scheduler.execute_next();
std::cout << " done\n";
}
return 0;
}
Note this is for educational purposes only, a real scheduler/executor would be more complex. But this should give you an idea how interfaces and dependency injection can really help you with unit testing.
I've read various answer on SO and still didn't understood how I should make an object method to be callable in this case:
Considering:
Class A
{
void generator(void)
{
int i = 1;
while(1)
{
if(i == 1)
{
one(/***/);//Should be a flag
i = 2;
}
else
{
two(/**/);//Should be a flag
i = 1;
}
}
}
template <typename CallbackFunction>
void one(CallbackFunction&& func)
{
}
template <typename CallbackFunction>
void two(CallbackFunction&& func)
{
}
A()
{
std::thread t(&A::generator, this);
t.detach();
}
};
and a simple main file:
void pOne(/**/)
{
std::cout<<"1"<<std::endl;
}
void pTwo(/**/)
{
std::cout<<"2"<<std::endl;
}
A myA;
A.One(pOne);
A.Two(pTwo);
int main(int argc, char** argv)
{
while(1){}
}
Here are where I'm at:
generator() should update a flag, and both one() & two() should poll on that flag & loop forever.
One() (two() also) should have a function pointer as parameters and if necessary other parameters, pOne() should have the same parameters except the function pointer.
So my questions are:
1) Is my understanding correct?
2) Is there a clean way to make generator() to start one() or two() ? (flags, semaphore, mutex, or anything that is a standard way to do it)
3) Assuming that the code was working, is it behaving as I expect ? i.e. printing 1 and 2?
if it matters, I'm on ubuntu
Disclaimer 1: Like everyone else, I'm interpreting the question as:
-> You need an event handler
-> You want callback methods on those events
And the only reason I think that is because I helped you on a i2c handler sequence before.
Also, there are better logic than this, its provided following your stubs "rules".
You mentioned that you are on Ubuntu, so you will be lacking windows event system.
Disclaimer 2:
1- To avoid going to deep I'm going to use a simple way to handle events.
2- Code is untested & provided for logic only
class Handler
{
private:
std::mutex event_one;
event_one.lock();
void stubEventGenerator(void)
{
for(;;)
{
if(!event_one.try_lock())
{
event_one.unlock();
}
sleep(15); //you had a sleep so I added one
}
}
template <typename CallbackFunction>
void One__(CallbackFunction && func)
{
while(1)
{
event_one.lock();
func();
}
}
public:
Handler()
{
std::thread H(&Handler::stubEventGenerator, this);
}
~Handler()
{
//clean threads, etc
//this should really have a quit handler
}
template <typename CallbackFunction>
void One(CallbackFunction && func) //I think you have it right, still I'm not 100% sure
{
std::thread One_thread(&Handler::One__, this, func); //same here
}
};
Some points:
One() as to be a wrapper for the thread calling One__() if you want it to be non-blocking.
mutex can be a simple way to handle events as long as the same event doesn't occur during its previous occurence (you are free to use a better/more suitable tool for your use case, or use boost:: only if necessary)
Prototype of One() & One__() are probably wrong, that's some research for you.
Finally: How it works:
std::mutex.lock() is blocking as long as it can't lock the mutex, thus One__ will wait as long as your event generator won't unlock it.
Once unlock One__ will execute your std::function & wait for the event (mutex) to be raised (unlock) again.
far from a perfect answer, but lack of time, and not being able to put that in a comment made me post it, will edit later
With whatever limited information you provided this code can be made compilable in following manner:
#include <iostream>
#include <thread>
typedef void(*fptr)();
void pOne(/**/)
{
std::cout<<"1"<<std::endl;
}
void pTwo(/**/)
{
std::cout<<"2"<<std::endl;
}
class A
{
public:
void generator(void)
{
int i = 1;
while(1)
{
if(i == 1)
{
fptr func = pOne;
one(func);//Should be a flag
i = 2;
}
else
{
fptr func = pTwo;
two(func);//Should be a flag
i = 1;
}
}
}
template <typename CallbackFunction>
void one(CallbackFunction&& func)
{
func();
}
template <typename CallbackFunction>
void two(CallbackFunction&& func)
{
func();
}
A()
{
std::thread t(&A::generator, this);
t.detach();
}
};
int main()
{
A myA;
while(1)
{
}
return 0;
}
If you want that one and two should accept any type/number of arguments then pass second argument as variadic template.Also I could not understand why you want one and two to be called from main as your generator function is for this purpose only and this generator function is called from thread which is detached in class constructor
I'm wondering what the best (cleanest, hardest to mess up) method for cleanup is in this situation.
void MyClass::do_stuff(boost::asio::yield_context context) {
while (running_) {
uint32_t data = async_buffer->Read(context);
// do other stuff
}
}
Read is a call which asynchronously waits until there is data to be read, then returns that data. If I want to delete this instance of MyClass, how can I make sure I do so properly? Let's say that the asynchronous wait here is performed via a deadline_timer's async_wait. If I cancel the event, I still have to wait for the thread to finish executing the "other stuff" before I know things are in a good state (I can't join the thread, as it's a thread that belongs to the io service that may also be handling other jobs). I could do something like this:
MyClass::~MyClass() {
running_ = false;
read_event->CancelEvent(); // some way to cancel the deadline_timer the Read is waiting on
boost::mutex::scoped_lock lock(finished_mutex_);
if (!finished_) {
cond_.wait(lock);
}
// any other cleanup
}
void MyClass::do_stuff(boost::asio::yield_context context) {
while (running_) {
uint32_t data = async_buffer->Read(context);
// do other stuff
}
boost::mutex::scoped_lock lock(finished_mutex_);
finished_ = true;
cond.notify();
}
But I'm hoping to make these stackful coroutines as easy to use as possible, and it's not straightforward for people to recognize that this condition exists and what would need to be done to make sure things are cleaned up properly. Is there a better way? Is what I'm trying to do here wrong at a more fundamental level?
Also, for the event (what I have is basically the same as Tanner's answer here) I need to cancel it in a way that I'd have to keep some extra state (a true cancel vs. the normal cancel used to fire the event) -- which wouldn't be appropriate if there were multiple pieces of logic waiting on that same event. Would love to hear if there's a better way to model the asynchronous event to be used with a coroutine suspend/resume.
Thanks.
EDIT: Thanks #Sehe, took a shot at a working example, I think this illustrates what I'm getting at:
class AsyncBuffer {
public:
AsyncBuffer(boost::asio::io_service& io_service) :
write_event_(io_service) {
write_event_.expires_at(boost::posix_time::pos_infin);
}
void Write(uint32_t data) {
buffer_.push_back(data);
write_event_.cancel();
}
uint32_t Read(boost::asio::yield_context context) {
if (buffer_.empty()) {
write_event_.async_wait(context);
}
uint32_t data = buffer_.front();
buffer_.pop_front();
return data;
}
protected:
boost::asio::deadline_timer write_event_;
std::list<uint32_t> buffer_;
};
class MyClass {
public:
MyClass(boost::asio::io_service& io_service) :
running_(false), io_service_(io_service), buffer_(io_service) {
}
void Run(boost::asio::yield_context context) {
while (running_) {
boost::system::error_code ec;
uint32_t data = buffer_.Read(context[ec]);
// do something with data
}
}
void Write(uint32_t data) {
buffer_.Write(data);
}
void Start() {
running_ = true;
boost::asio::spawn(io_service_, boost::bind(&MyClass::Run, this, _1));
}
protected:
boost::atomic_bool running_;
boost::asio::io_service& io_service_;
AsyncBuffer buffer_;
};
So here, let's say that the buffer is empty and MyClass::Run is currently suspended while making a call to Read, so there's a deadline_timer.async_wait that's waiting for the event to fire to resume that context. It's time to destroy this instance of MyClass, so how do we make sure that it gets done cleanly.
A more typical approach would be to use boost::enable_shared_from_this with MyClass, and run the methods as bound to the shared pointer.
Boost Bind supports binding to boost::shared_ptr<MyClass> transparently.
This way, you can automatically have the destructor run only when the last user disappears.
If you create a SSCCE, I'm happy to change it around, to show what I mean.
UPDATE
To the SSCCEE: Some remarks:
I imagined a pool of threads running the IO service
The way in which MyClass calls into AsyncBuffer member functions directly is not threadsafe. There is actually no thread safe way to cancel the event outside the producer thread[1], since the producer already access the buffer for Writeing. This could be mitigated using a strand (in the current setup I don't see how MyClass would likely be threadsafe). Alternatively, look at the active object pattern (for which Tanner has an excellent answer[2] on SO).
I chose the strand approach here, for simplicity, so we do:
void MyClass::Write(uint32_t data) {
strand_.post(boost::bind(&AsyncBuffer::Write, &buffer_, data));
}
You ask
Also, for the event (what I have is basically the same as Tanner's answer here) I need to cancel it in a way that I'd have to keep some extra state (a true cancel vs. the normal cancel used to fire the event)
The most natural place for this state is the usual for the deadline_timer: it's deadline. Stopping the buffer is done by resetting the timer:
void AsyncBuffer::Stop() { // not threadsafe!
write_event_.expires_from_now(boost::posix_time::seconds(-1));
}
This at once cancels the timer, but is detectable because the deadline is in the past.
Here's a simple demo with a a group of IO service threads, one "producer coroutine" that produces random numbers and a "sniper thread" that snipes the MyClass::Run coroutine after 2 seconds. The main thread is the sniper thread.
See it Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/async_result.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include <boost/atomic.hpp>
#include <list>
#include <iostream>
// for refcounting:
#include <boost/enable_shared_from_this.hpp>
#include <boost/make_shared.hpp>
namespace asio = boost::asio;
class AsyncBuffer {
friend class MyClass;
protected:
AsyncBuffer(boost::asio::io_service &io_service) : write_event_(io_service) {
write_event_.expires_at(boost::posix_time::pos_infin);
}
void Write(uint32_t data) {
buffer_.push_back(data);
write_event_.cancel();
}
uint32_t Read(boost::asio::yield_context context) {
if (buffer_.empty()) {
boost::system::error_code ec;
write_event_.async_wait(context[ec]);
if (ec != boost::asio::error::operation_aborted || write_event_.expires_from_now().is_negative())
{
if (context.ec_)
*context.ec_ = boost::asio::error::operation_aborted;
return 0;
}
}
uint32_t data = buffer_.front();
buffer_.pop_front();
return data;
}
void Stop() {
write_event_.expires_from_now(boost::posix_time::seconds(-1));
}
private:
boost::asio::deadline_timer write_event_;
std::list<uint32_t> buffer_;
};
class MyClass : public boost::enable_shared_from_this<MyClass> {
boost::atomic_bool stopped_;
public:
MyClass(boost::asio::io_service &io_service) : stopped_(false), buffer_(io_service), strand_(io_service) {}
void Run(boost::asio::yield_context context) {
while (!stopped_) {
boost::system::error_code ec;
uint32_t data = buffer_.Read(context[ec]);
if (ec == boost::asio::error::operation_aborted)
break;
// do something with data
std::cout << data << " " << std::flush;
}
std::cout << "EOF\n";
}
bool Write(uint32_t data) {
if (!stopped_) {
strand_.post(boost::bind(&AsyncBuffer::Write, &buffer_, data));
}
return !stopped_;
}
void Start() {
if (!stopped_) {
stopped_ = false;
boost::asio::spawn(strand_, boost::bind(&MyClass::Run, shared_from_this(), _1));
}
}
void Stop() {
stopped_ = true;
strand_.post(boost::bind(&AsyncBuffer::Stop, &buffer_));
}
~MyClass() {
std::cout << "MyClass destructed because no coroutines hold a reference to it anymore\n";
}
protected:
AsyncBuffer buffer_;
boost::asio::strand strand_;
};
int main()
{
boost::thread_group tg;
asio::io_service svc;
{
// Start the consumer:
auto instance = boost::make_shared<MyClass>(svc);
instance->Start();
// Sniper in 2 seconds :)
boost::thread([instance]{
boost::this_thread::sleep_for(boost::chrono::seconds(2));
instance->Stop();
}).detach();
// Start the producer:
auto producer_coro = [instance, &svc](asio::yield_context c) { // a bound function/function object in C++03
asio::deadline_timer tim(svc);
while (instance->Write(rand())) {
tim.expires_from_now(boost::posix_time::milliseconds(200));
tim.async_wait(c);
}
};
asio::spawn(svc, producer_coro);
// Start the service threads:
for(size_t i=0; i < boost::thread::hardware_concurrency(); ++i)
tg.create_thread(boost::bind(&asio::io_service::run, &svc));
}
// now `instance` is out of scope, it will selfdestruct after the snipe
// completed
boost::this_thread::sleep_for(boost::chrono::seconds(3)); // wait longer than the snipe
std::cout << "This is the main thread _after_ MyClass self-destructed correctly\n";
// cleanup service threads
tg.join_all();
}
[1] logical thread, this could be a coroutine that gets resumed on different threads
[2] boost::asio and Active Object
I'm trying to make a thread to callback a function of the object that created the thread. But it seems it is not posible to pass "this" as a parameter. Is there a way to implement this? Thanks in advance.
Helper.cpp
void Helper::ProcessSomething(void (*callback)(void))
{
boost::this_thread::sleep(boost::posix_time::seconds(1));
callback();
}
SomeClass.cpp
void SomeClass::Start(void)
{
Helper *helper = Helper();
boost::thread t(&Helper::ProcessSomething, helper, &this->SomeCallback);
t.join();
}
void SomeClass::SomeCallback(void)
{
std::cout << "Callback called" << std::endl;
}
The problem is that SomeCallback is not static (at least not that I can see), so there is another this unaccounted for in thread's constructor. Further, because it's not static, you can't convert SomeCallback to the void(*)(void) that ProcessSomething requires.
The simplest fix would just be to make SomeCallback static (and change &this->SomeCallback to &SomeClass::SomeCallback). But if you can't, this is a possible workaround (I'm assuming you don't have C++11):
void Helper::ProcessSomething(boost::function<void()> callback)
{
boost::this_thread::sleep(boost::posix_time::seconds(1));
callback();
}
// ...
void SomeClass::Start(void)
{
Helper *helper = Helper();
boost::function<void()> callback = boost::bind(&SomeClass::SomeCallback, this);
boost::thread t(&Helper::ProcessSomething, helper, callback);
t.join();
}
If you do have C++11 (but want to use boost::thread anyway), you could use a lambda instead of a binding:
void SomeClass::Start(void)
{
Helper *helper = Helper();
boost::thread t(&Helper::ProcessSomething, helper, [this]() { SomeCallback(); });
t.join();
}
I have an object that runs around a boost::asio::io_service which has some properties. Something like that:
class Foo
{
private:
// Not an int in my real code, but it doesn't really matter.
int m_bar;
boost::asio::io_service& m_io_service;
boost::asio::strand m_bar_strand;
};
m_bar is to be used only from a handler that is called through the strand m_bar_strand. This allows me not to lock from within those handlers.
To set the m_bar property from outside a thread that runs io_service::run() I wrote an asynchronous_setter, like so:
class Foo
{
public:
void async_get_bar(function<void (int)> handler)
{
m_bar_strand.post(bind(&Foo::do_get_bar, this, handler));
}
void async_set_bar(int value, function<void ()> handler)
{
m_bar_strand.post(bind(&Foo::do_set_bar, this, value, handler));
}
private:
void do_get_bar(function<void (int)> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
// Run the handler to notify the caller.
handler(m_bar);
}
void do_set_bar(int value, function<void ()> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
m_bar = value;
// Run the handler to notify the caller.
handler();
}
int m_bar;
boost::asio::io_service& m_io_service;
boost::asio::strand m_bar_strand;
};
This works perfectly but now I'd like to write a synchronous version of set_bar that sets the value and returns only when the set was effective. It must still guarantee that the effective set will occur within the m_bar_strand. Ideally, something reentrant.
I can imagine solutions with semaphores that would be modified from within the handler but everything I come up seems hackish and really not elegant. Is there something in Boost/Boost Asio that allows such a thing?
How would you proceed to implement this method?
If you need to synchronously wait on a value to be set, then Boost.Thread's futures may provide an elegant solution:
The futures library provides a means of handling synchronous future values, whether those values are generated by another thread, or on a single thread in response to external stimuli, or on-demand.
In short, a boost::promise is created and allows for a value to be set on it. The value can later be retrieved via an associated boost::future. Here is a basic example:
boost::promise<int> promise;
boost::unique_future<int> future = promise.get_future();
// start asynchronous operation that will invoke future.set_value(42)
...
assert(future.get() == 42); // blocks until future has been set.
Two other notable benefits to this approach:
future is part of C++11.
Exceptions can even be passed to future via promise::set_exception(), supporting an elegant way to provide exceptions or errors to the caller.
Here is a complete example based on the original code:
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
class Foo
{
public:
Foo(boost::asio::io_service& io_service)
: m_io_service(io_service),
m_bar_strand(io_service)
{}
public:
void async_get_bar(boost::function<void(int)> handler)
{
m_bar_strand.post(bind(&Foo::do_get_bar, this, handler));
}
void async_set_bar(int value, boost::function<void()> handler)
{
m_bar_strand.post(bind(&Foo::do_set_bar, this, value, handler));
}
int bar()
{
typedef boost::promise<int> promise_type;
promise_type promise;
// Pass the handler to async operation that will set the promise.
void (promise_type::*setter)(const int&) = &promise_type::set_value;
async_get_bar(boost::bind(setter, &promise, _1));
// Synchronously wait for promise to be fulfilled.
return promise.get_future().get();
}
void bar(int value)
{
typedef boost::promise<void> promise_type;
promise_type promise;
// Pass the handler to async operation that will set the promise.
async_set_bar(value, boost::bind(&promise_type::set_value, &promise));
// Synchronously wait for the future to finish.
promise.get_future().wait();
}
private:
void do_get_bar(boost::function<void(int)> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
// Run the handler to notify the caller.
handler(m_bar);
}
void do_set_bar(int value, boost::function<void()> handler)
{
// This is only called from within the m_bar_strand, so we are safe.
m_bar = value;
// Run the handler to notify the caller.
handler();
}
int m_bar;
boost::asio::io_service& m_io_service;
boost::asio::strand m_bar_strand;
};
int main()
{
boost::asio::io_service io_service;
boost::asio::io_service::work work(io_service);
boost::thread t(
boost::bind(&boost::asio::io_service::run, boost::ref(io_service)));
Foo foo(io_service);
foo.bar(21);
std::cout << "foo.bar is " << foo.bar() << std::endl;
foo.bar(2 * foo.bar());
std::cout << "foo.bar is " << foo.bar() << std::endl;
io_service.stop();
t.join();
}
which provides the following output:
foo.bar is 21
foo.bar is 42
You could use a pipe to notify the synchronous method when the value is set in async_set_bar(). Warning, the below code is brain-compiled and likely has errors but it should get the point across
#include <boost/asio.hpp>
#include <iostream>
#include <thread>
class Foo
{
public:
Foo( boost::asio::io_service& io_service ) :
_bar( 0 ),
_io_service( io_service ),
_strand( _io_service ),
_readPipe( _io_service ),
_writePipe( _io_service )
{
boost::asio::local::connect_pair( _readPipe, _writePipe );
}
void set_async( int v ) {
_strand.post( [=]
{
_bar = v;
std::cout << "sending " << _bar << std::endl;
_writePipe.send( boost::asio::buffer( &_bar, sizeof(_bar) ) );
}
);
}
void set_sync( int v ) {
this->set_async( v );
int value;
_readPipe.receive( boost::asio::buffer(&value, sizeof(value) ) );
std::cout << "set value to " << value << std::endl;
}
private:
int _bar;
boost::asio::io_service& _io_service;
boost::asio::io_service::strand _strand;
boost::asio::local::stream_protocol::socket _readPipe;
boost::asio::local::stream_protocol::socket _writePipe;
};
int
main()
{
boost::asio::io_service io_service;
boost::asio::io_service::work w(io_service);
std::thread t( [&]{ io_service.run(); } );
Foo f( io_service );
f.set_sync( 20 );
io_service.stop();
t.join();
}
if you are unable to use c++11 lambdas, replace them with boost::bind and some more completion handler methods.
This is what I came up with:
class synchronizer_base
{
protected:
synchronizer_base() :
m_has_result(false),
m_lock(m_mutex)
{
}
void wait()
{
while (!m_has_result)
{
m_condition.wait(m_lock);
}
}
void notify_result()
{
m_has_result = true;
m_condition.notify_all();
}
private:
boost::atomic<bool> m_has_result;
boost::mutex m_mutex;
boost::unique_lock<boost::mutex> m_lock;
boost::condition_variable m_condition;
};
template <typename ResultType = void>
class synchronizer : public synchronizer_base
{
public:
void operator()(const ResultType& result)
{
m_result = result;
notify_result();
}
ResultType wait_result()
{
wait();
return m_result;
}
private:
ResultType m_result;
};
template <>
class synchronizer<void> : public synchronizer_base
{
public:
void operator()()
{
notify_result();
}
void wait_result()
{
wait();
}
};
And I can use it, that way:
class Foo
{
public:
void async_get_bar(function<void (int)> handler)
{
m_bar_strand.post(bind(&Foo::do_get_bar, this, value, handler));
}
void async_set_bar(int value, function<void ()> handler)
{
m_bar_strand.post(bind(&Foo::do_set_bar, this, value, handler));
}
int get_bar()
{
synchronizer<int> sync;
async_get_bar(boost::ref(sync));
return sync.wait_result();
}
void set_bar(int value)
{
synchronizer<void> sync;
async_set_bar(value, boost::ref(sync));
sync.wait_result();
}
};
The boost::ref is necessary because the instances of synchronizer are non-copyable. This could be avoided by wrapping synchronizer in some other container-class, but I'm fine with that solution as it is.
Note: Do NOT call such "synchronized" functions from inside a handler or it might just deadlock !