Best way to common behaviour in state transition - c++

This question is about boost::msm.
I want to implement some base behaviour, for all state transition.
What is the best way to execute some function on any state transition? There is some default mechanism in this state machine?

I took the first sample I could find from https://www.boost.org/doc/libs/1_81_0/libs/msm/doc/HTML/examples/Orthogonal-deferred.cpp and extracted the on_enter/on_exit behaviours:
using boost::core::demangle;
template <typename Derived> struct SharedBehaviours : public msm::front::state<> {
void on_entry(auto const& /*evt*/, auto& /*fsm*/) { std::cout << "entering: " << demangle(typeid(Derived).name()) << std::endl; }
void on_exit(auto const& /*evt*/, auto& /*fsm*/) { std::cout << "leaving: " << demangle(typeid(Derived).name()) << std::endl; }
};
Now all states can derive from it instead:
struct Empty : SharedBehaviours<Empty> {
typedef mpl::vector<play> deferred_events;
};
struct Open : SharedBehaviours<Open> {
typedef mpl::vector<play> deferred_events;
typedef mpl::vector1<CDLoaded> flag_list;
};
struct Stopped : SharedBehaviours<Stopped> {
typedef mpl::vector1<CDLoaded> flag_list;
};
struct Song1 : SharedBehaviours<Song1> {
typedef mpl::vector1<FirstSongPlaying> flag_list;
};
struct Song2 : SharedBehaviours<Song2> {};
struct Song3 : SharedBehaviours<Song3> {};
It seems to work as I expect. Making the output a bit less noisy:
template <typename Derived> struct SharedBehaviours : public msm::front::state<> {
static std::string_view simple_name() {
static auto const cache = [] {
auto full = boost::core::demangle(typeid(Derived).name());
return full.substr(full.find_last_of(":;.'\"[]{}|$") + 1);
}();
return cache;
}
void on_entry(auto const& /*evt*/, auto& /*fsm*/) { std::cout << "entering: " << simple_name() << std::endl; }
void on_exit(auto const& /*evt*/, auto& /*fsm*/) { std::cout << "leaving: " << simple_name() << std::endl; }
};
Prints: Live On Coliru
#include <iostream>
// back-end
#include <boost/msm/back/state_machine.hpp>
// front-end
#include <boost/msm/front/state_machine_def.hpp>
namespace msm = boost::msm;
namespace mpl = boost::mpl;
namespace {
template <typename Derived> struct SharedBehaviours : public msm::front::state<> {
static std::string_view simple_name() {
static auto const cache = [] {
auto full = boost::core::demangle(typeid(Derived).name());
return full.substr(full.find_last_of(":;.'\"[]{}|$") + 1);
}();
return cache;
}
void on_entry(auto const& /*evt*/, auto& /*fsm*/) {
std::cout << "entering: " << simple_name() << std::endl;
}
void on_exit(auto const& /*evt*/, auto& /*fsm*/) {
std::cout << "leaving: " << simple_name() << std::endl;
}
};
// events
struct play {};
struct end_pause {};
struct stop {};
struct pause {};
struct open_close {};
struct NextSong {};
struct PreviousSong {};
struct error_found {};
struct end_error {};
struct end_error2 {};
// Flags. Allow information about a property of the current state
struct PlayingPaused {};
struct CDLoaded {};
struct FirstSongPlaying {};
// A "complicated" event type that carries some data.
struct cd_detected {
cd_detected(std::string name) : name(name) {}
std::string name;
};
// front-end: define the FSM structure
struct player_ : public msm::front::state_machine_def<player_> {
template <class Event, class FSM> void on_entry(Event const&, FSM&) {
std::cout << "entering: Player" << std::endl;
}
template <class Event, class FSM> void on_exit(Event const&, FSM&) {
std::cout << "leaving: Player" << std::endl;
}
// The list of FSM states
struct Empty : SharedBehaviours<Empty> {
// if the play event arrives in this state, defer it until a state
// handles it or rejects it
typedef mpl::vector<play> deferred_events;
};
struct Open : SharedBehaviours<Open> {
// if the play event arrives in this state, defer it until a state
// handles it or rejects it
typedef mpl::vector<play> deferred_events;
typedef mpl::vector1<CDLoaded> flag_list;
};
struct Stopped : SharedBehaviours<Stopped> {
// when stopped, the CD is loaded
typedef mpl::vector1<CDLoaded> flag_list;
};
// the player state machine contains a state which is himself a state machine
// as you see, no need to declare it anywhere so Playing can be developed
// separately by another team in another module. For simplicity I just declare it
// inside player
struct Playing_ : public msm::front::state_machine_def<Playing_> {
// when playing, the CD is loaded and we are in either pause or playing (duh)
typedef mpl::vector2<PlayingPaused, CDLoaded> flag_list;
// The list of FSM states
struct Song1 : SharedBehaviours<Song1> {
typedef mpl::vector1<FirstSongPlaying> flag_list;
};
struct Song2 : SharedBehaviours<Song2> {};
struct Song3 : SharedBehaviours<Song3> {};
// the initial state. Must be defined
typedef Song1 initial_state;
// transition actions
void start_next_song(NextSong const&) { std::cout << "Playing::start_next_song\n"; }
void start_prev_song(PreviousSong const&) { std::cout << "Playing::start_prev_song\n"; }
// guard conditions
typedef Playing_ pl; // makes transition table cleaner
// Transition table for Playing
// clang-format off
struct transition_table : mpl::vector4<
// Start Event Next Action Guard
// +---------+-------------+---------+---------------------+----------------------+
a_row < Song1 , NextSong , Song2 , &pl::start_next_song >,
a_row < Song2 , PreviousSong, Song1 , &pl::start_prev_song >,
a_row < Song2 , NextSong , Song3 , &pl::start_next_song >,
a_row < Song3 , PreviousSong, Song2 , &pl::start_prev_song >
// +---------+-------------+---------+---------------------+----------------------+
> {}; // clang-format on
// Replaces the default no-transition response.
template <class FSM, class Event> void no_transition(Event const& e, FSM&, int state) {
std::cout << "no transition from state " << state << " on event " << typeid(e).name()
<< std::endl;
}
};
// back-end
typedef msm::back::state_machine<Playing_> Playing;
// state not defining any entry or exit
struct Paused : SharedBehaviours<Paused> {
typedef mpl::vector2<PlayingPaused, CDLoaded> flag_list;
};
struct AllOk : SharedBehaviours<AllOk> {};
// this state is also made terminal so that all the events are blocked
struct ErrorMode
: // public msm::front::terminate_state<> // ErrorMode terminates the state
// machine
public msm::front::interrupt_state<
end_error /*mpl::vector<end_error,end_error2>*/> // ErroMode just
// interrupts. Will
// resume if the event
// end_error is
// generated
{};
// the initial state of the player SM. Must be defined
typedef mpl::vector<Empty, AllOk> initial_state;
// transition actions
void start_playback(play const&) { std::cout << "player::start_playback\n"; }
void open_drawer(open_close const&) { std::cout << "player::open_drawer\n"; }
void close_drawer(open_close const&) { std::cout << "player::close_drawer\n"; }
void store_cd_info(cd_detected const& cd) { std::cout << "player::store_cd_info\n"; }
void stop_playback(stop const&) { std::cout << "player::stop_playback\n"; }
void pause_playback(pause const&) { std::cout << "player::pause_playback\n"; }
void resume_playback(end_pause const&) { std::cout << "player::resume_playback\n"; }
void stop_and_open(open_close const&) { std::cout << "player::stop_and_open\n"; }
void stopped_again(stop const&) { std::cout << "player::stopped_again\n"; }
void report_error(error_found const&) { std::cout << "player::report_error\n"; }
void report_end_error(end_error const&) { std::cout << "player::report_end_error\n"; }
// guard conditions
typedef player_ p; // makes transition table cleaner
// Transition table for player
// clang-format off
struct transition_table : mpl::vector<
// Start Event Next Action Guard
// +---------+-------------+---------+---------------------+----------------------+
a_row < Stopped , play , Playing , &p::start_playback >,
a_row < Stopped , open_close , Open , &p::open_drawer >,
a_row < Stopped , stop , Stopped , &p::stopped_again >,
// +---------+-------------+---------+---------------------+----------------------+
a_row < Open , open_close , Empty , &p::close_drawer >,
// +---------+-------------+---------+---------------------+----------------------+
a_row < Empty , open_close , Open , &p::open_drawer >,
a_row < Empty , cd_detected , Stopped , &p::store_cd_info >,
// +---------+-------------+---------+---------------------+----------------------+
a_row < Playing , stop , Stopped , &p::stop_playback >,
a_row < Playing , pause , Paused , &p::pause_playback >,
a_row < Playing , open_close , Open , &p::stop_and_open >,
// +---------+-------------+---------+---------------------+----------------------+
a_row < Paused , end_pause , Playing , &p::resume_playback >,
a_row < Paused , stop , Stopped , &p::stop_playback >,
a_row < Paused , open_close , Open , &p::stop_and_open >,
// +---------+-------------+---------+---------------------+----------------------+
a_row < AllOk , error_found ,ErrorMode, &p::report_error >,
a_row <ErrorMode,end_error ,AllOk , &p::report_end_error >
// +---------+-------------+---------+---------------------+----------------------+
> {};
// clang-format on
// Replaces the default no-transition response.
template <class FSM, class Event> void no_transition(Event const& e, FSM&, int state) {
std::cout << "no transition from state " << state << " on event " << typeid(e).name()
<< std::endl;
}
};
// Pick a back-end
typedef msm::back::state_machine<player_> player;
//
// Testing utilities.
//
static char const* const state_names[] = {"Stopped", "Open", "Empty", "Playing",
"Paused", "AllOk", "ErrorMode"};
void pstate(player const& p) {
// we have now several active states, which we show
for (unsigned int i = 0; i < player::nr_regions::value; ++i) {
std::cout << " -> " << state_names[p.current_state()[i]] << std::endl;
}
}
void test() {
player p;
// needed to start the highest-level SM. This will call on_entry and mark the
// start of the SM
p.start();
// test deferred event
// deferred in Empty and Open, will be handled only after event cd_detected
p.process_event(play());
// tests some flags
std::cout << "CDLoaded active:" << std::boolalpha << p.is_flag_active<CDLoaded>()
<< std::endl; //=> false (no CD yet)
// go to Open, call on_exit on Empty, then action, then on_entry on Open
p.process_event(open_close());
pstate(p);
p.process_event(open_close());
pstate(p);
p.process_event(cd_detected("louie, louie"));
// at this point, Play is active (was deferred)
std::cout << "PlayingPaused active:" << std::boolalpha << p.is_flag_active<PlayingPaused>()
<< std::endl; //=> true
std::cout << "FirstSong active:" << std::boolalpha << p.is_flag_active<FirstSongPlaying>()
<< std::endl; //=> true
// make transition happen inside it. Player has no idea about this event but it's
// ok.
p.process_event(NextSong());
pstate(p); // 2nd song active
p.process_event(NextSong());
pstate(p); // 3rd song active
p.process_event(PreviousSong());
pstate(p); // 2nd song active
std::cout << "FirstSong active:" << std::boolalpha << p.is_flag_active<FirstSongPlaying>()
<< std::endl; //=> false
std::cout << "PlayingPaused active:" << std::boolalpha << p.is_flag_active<PlayingPaused>()
<< std::endl; //=> true
p.process_event(pause());
pstate(p);
std::cout << "PlayingPaused active:" << std::boolalpha << p.is_flag_active<PlayingPaused>()
<< std::endl; //=> true
// go back to Playing
// as you see, it starts back from the original state
p.process_event(end_pause());
pstate(p);
p.process_event(pause());
pstate(p);
p.process_event(stop());
pstate(p);
std::cout << "PlayingPaused active:" << std::boolalpha << p.is_flag_active<PlayingPaused>()
<< std::endl; //=> false
std::cout << "CDLoaded active:" << std::boolalpha << p.is_flag_active<CDLoaded>()
<< std::endl; //=> true
// by default, the flags are OR'ed but you can also use AND. Then the flag must be
// present in all of the active states
std::cout << "CDLoaded active with AND:" << std::boolalpha
<< p.is_flag_active<CDLoaded, player::Flag_AND>() << std::endl; //=> false
// event leading to the same state
p.process_event(stop());
pstate(p);
// event leading to a terminal/interrupt state
p.process_event(error_found());
pstate(p);
// try generating more events
std::cout << "Trying to generate another event"
<< std::endl; // will not work, fsm is terminated or interrupted
p.process_event(play());
pstate(p);
std::cout << "Trying to end the error" << std::endl; // will work only if ErrorMode is interrupt state
p.process_event(end_error());
pstate(p);
std::cout << "Trying to generate another event"
<< std::endl; // will work only if ErrorMode is interrupt state
p.process_event(play());
pstate(p);
std::cout << "stop fsm" << std::endl;
p.stop();
}
} // namespace
int main() { test(); }
Output:
entering: Player
entering: Empty
entering: AllOk
CDLoaded active:false
leaving: Empty
player::open_drawer
entering: Open
-> Open
-> AllOk
leaving: Open
player::close_drawer
entering: Empty
-> Empty
-> AllOk
leaving: Empty
player::store_cd_info
entering: Stopped
leaving: Stopped
player::start_playback
entering: Song1
PlayingPaused active:true
FirstSong active:true
leaving: Song1
Playing::start_next_song
entering: Song2
-> Playing
-> AllOk
leaving: Song2
Playing::start_next_song
entering: Song3
-> Playing
-> AllOk
leaving: Song3
Playing::start_prev_song
entering: Song2
-> Playing
-> AllOk
FirstSong active:false
PlayingPaused active:true
leaving: Song2
player::pause_playback
entering: Paused
-> Paused
-> AllOk
PlayingPaused active:true
leaving: Paused
player::resume_playback
entering: Song1
-> Playing
-> AllOk
leaving: Song1
player::pause_playback
entering: Paused
-> Paused
-> AllOk
leaving: Paused
player::stop_playback
entering: Stopped
-> Stopped
-> AllOk
PlayingPaused active:false
CDLoaded active:true
CDLoaded active with AND:false
leaving: Stopped
player::stopped_again
entering: Stopped
-> Stopped
-> AllOk
leaving: AllOk
player::report_error
-> Stopped
-> ErrorMode
Trying to generate another event
-> Stopped
-> ErrorMode
Trying to end the error
player::report_end_error
entering: AllOk
-> Stopped
-> AllOk
Trying to generate another event
leaving: Stopped
player::start_playback
entering: Song1
-> Playing
-> AllOk
stop fsm
leaving: Song1
leaving: AllOk
leaving: Player

Related

Use a separate thread to execute a list of commands in c++

I have a DataClass class which has some data that needs to be generated and cleaned.
I don't know at which time during the execution of the program I'll have new data, and I was trying to use multiple threads to allow for the main one to proceed while the data is being processed.
this is the DataClass:
class DataClass
{
public:
unsigned int state{0};
void Generate()
{
using namespace std::chrono_literals;
std::this_thread::sleep_for(3s);
state = 1;
}
void Clean()
{
using namespace std::chrono_literals;
std::this_thread::sleep_for(1s);
state = 2;
}
};
I'm holding each DataClass object into two std::deque, one with the ones that needs to be generated and another with the ones that need to be cleaned.
std::deque<DataClass*> dataToGenerate;
std::deque<DataClass*> dataToClean;
I am using two function CleanerFunction and GeneratorFunction which will process the content of the two lists.
GeneratorFunction:
void GeneratorFunction()
{
while (!dataToGenerate.empty())
{
auto* c = dataToGenerate.front();
c->Generate();
dataToGenerate.pop_front();
dataToClean.push_back(c);
std::cout << "Generated one Data Piece." << std::endl;
}
}
(The cleaner one is similar).
And in the main function I start two threads for the two functions.
But the threads stop instantly as the two lists are empty and so I would need to create a new one each time. I've looked into condition_variables but I cannot get them to work properly as all the examples I found online were different from this scenario.
As far as I understood if I use a while(!dataToGenerate.empty()) I will execute that line always filling a thread without any actual reason, so I wanted to not use this method.
Is there a way that I could pause each thread until the lists are not empty anymore and then start the thread?
The XY ratio of this question seems rather high. I'd read up on task queuing and locking and rephrase your solution in terms of that.
In particular, you would find this close to a producer/consumer pattern.
Queue
Let's start with a minimal generic locking queue:
template <typename T>
struct Queue {
Queue(size_t max = 50) : _max(max) {}
size_t enqueue(T v) {
std::unique_lock lk(_mx);
_cond.wait(lk, [this] { return (_max == 0) || (_storage.size() < _max); });
_storage.push_back(std::move(v));
_cond.notify_one();
return _storage.size(); // NOTE: very racy load indicator
}
template <typename Duration>
std::optional<T> dequeue(Duration d) {
std::unique_lock lk(_mx);
if (_cond.wait_for(lk, d, [this] { return !_storage.empty(); })) {
auto top = std::move(_storage.front());
_storage.pop_front();
_cond.notify_one();
return top;
}
return std::nullopt;
}
private:
size_t _max;
mutable std::mutex _mx;
mutable std::condition_variable _cond;
std::deque<T> _storage;
};
This never blocks on dequeue (so you can detect and handle queue-empty). It will in principle not block on enqueue unless a certain limit has been reached. Make the limit 0 to have an unbounded queue.
Now you can have any number of queues with any number of producers/consumers you wish. E.g.:
Program Logic
struct DataClass {
int id;
unsigned int state{ 0 };
DataClass(int id) : id(id) {}
void Generate() { sleep_for(3s); state = 1; }
void Clean() { sleep_for(1s); state = 2; }
};
Now let's make a program with 4 generator threads and 2 cleaner threads, monitoring 2 queus (genTasks and cleanTasks).
struct Program {
Program() {
auto worker_id = 1;
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { clean_worker(id); });
_workers.emplace_back([this, id=worker_id++] { clean_worker(id); });
}
size_t createWork(DataClass task) {
return genTasks.enqueue(std::move(task));
}
~Program() {
_shutdown = true;
for (auto& th: _workers)
if (th.joinable()) th.join();
}
private:
Queue<DataClass> genTasks, cleanTasks;
std::atomic_bool _shutdown { false };
std::list<std::thread> _workers;
void generate_worker(int worker_id) {
while (!_shutdown) {
while (auto task = genTasks.dequeue(1s)) {
std::cout << "Worker #" << worker_id << " Generate: " << task->id << std::endl;
task->Generate();
cleanTasks.enqueue(std::move(*task));
}
}
std::cout << "Worker #" << worker_id << " Exit generate_worker" << std::endl;
}
void clean_worker(int worker_id) {
while (!_shutdown) {
while (auto task = cleanTasks.dequeue(1s)) {
std::cout << "Worker #" << worker_id << " Clean: " << task->id << std::endl;
task->Clean();
std::cout << "Worker #" << worker_id << " Done: " << task->id << std::endl;
}
}
std::cout << "Worker #" << worker_id << " Exit clean_worker" << std::endl;
}
};
I added a _shutdown flag for good measure, though it's not very forceful (it waits until the workers are idle for at least one second (dequeue(1s)). If you want more intrusive shutdown, sprinkle some if (_shutdown) break; statements through the worker loops.
Full Demo
Let's drive it with some work:
int main() {
Program p;
for (auto i : {1,2,3,4,5,6,7,8,9,10}) {
sleep_for((rand()%100) * 1ms);
p.createWork(i);
}
sleep_for(2.5s);
std::cout << "Load at createWork(42) is ~" << p.createWork(42) << std::endl;
sleep_for(2.5s);
std::cout << "Load at createWork(43) is ~" << p.createWork(43) << std::endl;
sleep_for(4s);
std::cout << "Initiating shutdown\n";
// Program destructor performs shutdown
}
Prints
Live On Coliru
Worker #2 Generate: 1
Worker #1 Generate: 2
Worker #3 Generate: 3
Worker #4 Generate: 4
Worker #2 Generate: 5
Worker #5 Clean: 1
Load at createWork(42) is ~6
Worker #1 Generate: 6
Worker #6 Clean: 2
Worker #3 Generate: 7
Worker #4 Generate: 8
Worker #5 Done: 1
Worker #5 Clean: 3
Worker #6 Done: 2
Worker #6 Clean: 4
Worker #5 Done: 3
Worker #6 Done: 4
Load at createWork(43) is ~4
Worker #2 Generate: 9
Worker #5 Clean: 5
Worker #1 Generate: 10
Worker #6 Clean: 6
Worker #3 Generate: 42
Worker #4 Generate: 43
Worker #5 Done: 5
Worker #5 Clean: 7
Worker #6 Done: 6
Worker #6 Clean: 8
Worker #5 Done: 7
Worker #6 Done: 8
Worker #5 Clean: 9
Worker #6 Clean: 10
Initiating shutdown
Worker #2 Exit generate_worker
Worker #5 Done: 9
Worker #5 Clean: 42
Worker #1 Exit generate_worker
Worker #6 Done: 10
Worker #6 Clean: 43
Worker #3 Exit generate_worker
Worker #4 Exit generate_worker
Worker #5 Done: 42
Worker #6 Done: 43
Worker #5 Exit clean_worker
Worker #6 Exit clean_worker
Unfinished generate/clean tasks: 0/0
Full Listing
Live On Coliru
#include <mutex>
#include <condition_variable>
#include <deque>
#include <optional>
template <typename T>
struct Queue {
Queue(size_t max = 50) : _max(max) {}
size_t enqueue(T v) {
std::unique_lock lk(_mx);
_cond.wait(lk, [this] { return (_max == 0) || (_storage.size() < _max); });
_storage.push_back(std::move(v));
_cond.notify_one();
return _storage.size(); // NOTE: very racy load indicator
}
template <typename Duration>
std::optional<T> dequeue(Duration d) {
std::unique_lock lk(_mx);
if (_cond.wait_for(lk, d, [this] { return !_storage.empty(); })) {
auto top = std::move(_storage.front());
_storage.pop_front();
_cond.notify_one();
return top;
}
return std::nullopt;
}
size_t size() const { // racy in multi-thread situations
std::unique_lock lk(_mx);
return _storage.size();
}
private:
size_t _max;
mutable std::mutex _mx;
mutable std::condition_variable _cond;
std::deque<T> _storage;
};
#include <chrono>
#include <thread>
#include <iostream>
#include <list>
#include <atomic>
using namespace std::chrono_literals;
static inline auto sleep_for = [](auto d) { std::this_thread::sleep_for(d); };
struct DataClass {
int id;
unsigned int state{ 0 };
DataClass(int id) : id(id) {}
//DataClass(DataClass&&) = default;
//DataClass& operator=(DataClass&&) = default;
//DataClass(DataClass const&) = delete;
void Generate() { sleep_for(3s); state = 1; }
void Clean() { sleep_for(1s); state = 2; }
};
struct Program {
Program() {
auto worker_id = 1;
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { generate_worker(id); });
_workers.emplace_back([this, id=worker_id++] { clean_worker(id); });
_workers.emplace_back([this, id=worker_id++] { clean_worker(id); });
}
size_t createWork(DataClass task) {
return genTasks.enqueue(std::move(task));
}
~Program() {
_shutdown = true;
for (auto& th: _workers)
if (th.joinable()) th.join();
std::cout << "Unfinished generate/clean tasks: " << genTasks.size() << "/" << cleanTasks.size() << "\n";
}
private:
Queue<DataClass> genTasks, cleanTasks;
std::atomic_bool _shutdown { false };
std::list<std::thread> _workers;
void generate_worker(int worker_id) {
while (!_shutdown) {
while (auto task = genTasks.dequeue(1s)) {
std::cout << "Worker #" << worker_id << " Generate: " << task->id << std::endl;
task->Generate();
cleanTasks.enqueue(std::move(*task));
}
}
std::cout << "Worker #" << worker_id << " Exit generate_worker" << std::endl;
}
void clean_worker(int worker_id) {
while (!_shutdown) {
while (auto task = cleanTasks.dequeue(1s)) {
std::cout << "Worker #" << worker_id << " Clean: " << task->id << std::endl;
task->Clean();
std::cout << "Worker #" << worker_id << " Done: " << task->id << std::endl;
}
}
std::cout << "Worker #" << worker_id << " Exit clean_worker" << std::endl;
}
};
int main() {
Program p;
for (auto i : {1,2,3,4,5,6,7,8,9,10}) {
sleep_for((rand()%100) * 1ms);
p.createWork(i);
}
sleep_for(2.5s);
std::cout << "Load at createWork(42) is ~" << p.createWork(42) << std::endl;
sleep_for(2.5s);
std::cout << "Load at createWork(43) is ~" << p.createWork(43) << std::endl;
sleep_for(4s);
std::cout << "Initiating shutdown\n";
// Program destructor performs shutdown
}

Difficulties in assigning threads for function handlers of async operations in boost asio

I knew that the thread in which runs io_service.run() is responsible of executing function handlers of an asynchronous operation, but I have problems in assigning a thread for an asynchronous operation that fires in callback function of a parent async operation.
For example consider the bellow program:
#ifdef WIN32
#define _WIN32_WINNT 0x0501
#include <stdio.h>
#endif
#include <fstream> // for writting to file
#include <iostream> // for writting to file
#include <stdlib.h> // atoi (string to integer)
#include <chrono>
#include <boost/thread.hpp> // for multi threading
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <signal.h> // For Interrupt Handling (Signal Handling Event)
#include <vector>
#define max_length 46
#define server_ip1 "127.0.0.1"
//#define server_ip2 "127.0.0.1"
#define server_port 4000
#define MEM_FN(x) boost::bind(&self_type::x, shared_from_this())
#define MEM_FN1(x,y) boost::bind(&self_type::x, shared_from_this(),y)
#define MEM_FN2(x,y,z) boost::bind(&self_type::x, shared_from_this(),y,z)
void talk1();
using namespace boost::asio;
io_service service, service2;
std::chrono::time_point<std::chrono::high_resolution_clock> t_start;
ip::udp::socket sock1(service);
ip::udp::endpoint ep1( ip::address::from_string(server_ip1), 4000);
//ip::udp::socket sock2(service);
//ip::udp::endpoint ep2( ip::address::from_string(server_ip2), 4000);
std::chrono::time_point<std::chrono::high_resolution_clock> tc;
int OnCon[2];
class talk_to_svr1 : public boost::enable_shared_from_this<talk_to_svr1>, boost::noncopyable {
typedef talk_to_svr1 self_type;
talk_to_svr1(const std::string & message, ip::udp::endpoint ep) : started_(true), message_(message) {}
void start(ip::udp::endpoint ep) {
do_write(message_);
}
public:
typedef boost::system::error_code error_code;
typedef boost::shared_ptr<talk_to_svr1> ptr;
static ptr start(ip::udp::endpoint ep, const std::string & message) {
ptr new_(new talk_to_svr1(message, ep));
new_->start(ep);
return new_;
}
bool started() { return started_; }
private:
void on_read(const error_code & err, size_t bytes) {
this->t2 = std::chrono::high_resolution_clock::now(); // Time of finished reading
if ( !err) {
auto t0_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t0-t_start).count();
auto t1_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t1-t_start).count();
auto t2_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t2-t_start).count();
std::cout << "Sock1: " << t0_rel << ", " << t1_rel << ", " << t2_rel << std::endl;
std::string msg(read_buffer_, bytes);
std::cout << msg << std::endl;
}
else {
std::cout << "Error occured in reading data from server (Sock1)" << std::endl;
}
}
void on_write(const error_code & err, size_t bytes) {
this->t1 = std::chrono::high_resolution_clock::now(); // Time of finished writting
std::cout << "Sock1 successfully sent " << bytes << " bytes of data" << std::endl;
do_read();
}
void do_read() {
sock1.async_receive_from(buffer(read_buffer_),ep1 ,MEM_FN2(on_read,_1,_2));
}
void do_write(const std::string & msg) {
if ( !started() ) return;
std::copy(msg.begin(), msg.end(), write_buffer_);
this->t0 = std::chrono::high_resolution_clock::now(); // Time of starting to write
sock1.async_send_to( buffer(write_buffer_, msg.size()), ep1, MEM_FN2(on_write,_1,_2) );
}
public:
std::chrono::time_point<std::chrono::high_resolution_clock> t0; // Time of starting to write
std::chrono::time_point<std::chrono::high_resolution_clock> t1; // Time of finished writting
std::chrono::time_point<std::chrono::high_resolution_clock> t2; // Time of finished reading
private:
int indx;
char read_buffer_[max_length];
char write_buffer_[max_length];
bool started_;
std::string message_;
};
void wait_s(int seconds)
{
boost::this_thread::sleep_for(boost::chrono::seconds{seconds});
}
void wait_ms(int msecs) {
boost::this_thread::sleep( boost::posix_time::millisec(msecs));
}
void async_thread() {
service.run();
}
void async_thread2() {
service2.run();
}
void GoOperational(int indx) {
if (indx == 0) {
talk_to_svr1::start(ep1, "Message01");
wait_s(1);
talk_to_svr1::start(ep1, "Message02");
wait_s(2);
}
else if (indx == 1) {
//talk_to_svr2::start(ep2, "Masoud");
wait_s(1);
//talk_to_svr2::start(ep2, "Ahmad");
wait_s(2);
}
else {
std::cout << "Wrong index!." << std::endl;
}
}
void on_connect(const boost::system::error_code & err, int ii) {
std::cout << "Socket "<< ii << " is connected."<< std::endl;
OnCon[ii] = 1;
if ( !err) {
tc = std::chrono::high_resolution_clock::now();
auto ty = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(tc-t_start).count();
std::cout << "Sock " << ii << " connected at time: " << ty << " seconds" << std::endl;
if ( (OnCon[0] /*+ OnCon[1]*/ ) == 1) {
GoOperational(0);
//GoOperational(1);
}
}
else {
std::cout << "Socket " << ii << "had a problem for connecting to server.";
}
}
int main(int argc, char* argv[]) {
OnCon[0] = 0;
OnCon[1] = 0;
ep1 = ep1;
//ep2 = ep2;
std::cout.precision(9);
std::cout << "///////////////////////" << std::endl;
std::cout << "Socket Number, Time of starting to write, Time of finished writting, time of finished reading" << std::endl;
t_start = std::chrono::high_resolution_clock::now();
sock1.async_connect(ep1, boost::bind(on_connect, boost::asio::placeholders::error, 0));
//sock2.async_connect(ep2, boost::bind(on_connect, boost::asio::placeholders::error, 1));
boost::thread b{boost::bind(async_thread)};
b.join();
}
In this program I have a global udp socket named sock1 which will connect by running sock1.async_connect() at line #9 of main function. At the callback function of this asynchronous operation, I make two instance of talk_to_svr1 class which each of them is responsible for sending a messages to server and then receiving the response from server asynchronously.
I need to wait 3 seconds before sending second message and that is why I called wait_s(1) before making second instance of talk_to_svr1. The problem is that calling wait_s(1) in addition to pausing the main thread will also pause the the asynchronous sending operation which is not desired.
I would be grateful if anybody could change the above code in a way that another thread become responsible for asynchronously sending message to server so that calling wait_s(1) will not pause sending operation.
Note: posted an alternative using coroutines as well
Asynchronous coding by definition doesn't require you to "control" threads. In fact, you shouldn't need threads. Of course, you can't block inside completion handlers because that will hinder progress.
You can simply use a timer, expiring in 3s, async_wait for it and in its completion handler send the second request.
Here's a big cleanup of your code. Note that I removed all use of global variables. They were making things very error prone and leading to a lot of duplication (in fact talk_to_svr1 hardcoded ep1 and sock1 so it was useless for your second channel, that was largely commented out).
The crux of the change is to have message_operation take a continuation:
template <typename F_>
void async_message(udp::socket& s, std::string const& message, F_&& handler) {
using Op = message_operation<F_>;
boost::shared_ptr<Op> new_(new Op(s, message, std::forward<F_>(handler)));
new_->do_write();
}
When the message/response is completed, handler is called. Now, we can implement the application protocol (basically what you tried to capture in on_connect/GoOperational):
////////////////////////////////////////////////////
// basic protocol (2 messages, 1 delay)
struct ApplicationProtocol {
ApplicationProtocol(ba::io_service& service, udp::endpoint ep, std::string m1, std::string m2, std::chrono::seconds delay = 3s)
: _service(service),
_endpoint(ep),
message1(std::move(m1)), message2(std::move(m2)),
delay(delay), timer(service)
{ }
void go() {
_socket.async_connect(_endpoint, boost::bind(&ApplicationProtocol::on_connect, this, _1));
}
private:
ba::io_service& _service;
udp::socket _socket{_service};
udp::endpoint _endpoint;
std::string message1, message2;
std::chrono::seconds delay;
ba::high_resolution_timer timer;
void on_connect(error_code ec) {
std::cout << _endpoint << " connected at " << relatime() << " ms\n";
if (!ec) {
async_message(_socket, message1, boost::bind(&ApplicationProtocol::on_message1_sent, this, _1, _2));
} else {
std::cout << "Socket had a problem for connecting to server.";
}
}
void on_message1_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 1 failed: " << ec.message() << "\n";
else {
std::cout << "Message 1 returned: '" << response << "'\n";
timer.expires_from_now(delay);
timer.async_wait(boost::bind(&ApplicationProtocol::on_delay_complete, this, _1));
}
}
void on_delay_complete(error_code ec) {
if (ec)
std::cout << "Delay faile: " << ec.message() << "\n";
else {
std::cout << "Delay completed\n";
async_message(_socket, message2, boost::bind(&ApplicationProtocol::on_message2_sent, this, _1, _2));
}
}
void on_message2_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 2 failed: " << ec.message() << "\n";
else {
std::cout << "Message 2 returned: '" << response << "'\n";
}
}
};
Note how much simpler it becomes to use it:
int main() {
ba::io_service service;
std::cout.precision(2);
std::cout << std::fixed;
ApplicationProtocol
channel1(service, {{}, 4000}, "Message01\n", "Message02\n", 3s),
channel2(service, {{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
channel1.go();
channel2.go();
service.run();
}
When running two udp services like so:
yes first|nl|netcat -ulp 4000& yes second|nl|netcat -ulp 4001& time wait
We get the following output: Live On Coliru
0.0.0.0:4000 connected at 1.87 ms
0.0.0.0:4001 connected at 1.99 ms
127.0.0.1:4000 successfully sent 10 bytes of data
127.0.0.1:4001 successfully sent 7 bytes of data
127.0.0.1:4000: start 1.91, written 2.03, finished 2.25 ms
Message 1 returned: ' 1 first
2 first
3 first
4 '
127.0.0.1:4001: start 2.00, written 2.06, finished 2.34 ms
Message 1 returned: ' 1 second
2 second
3 second
'
Delay completed
127.0.0.1:4001 successfully sent 6 bytes of data
127.0.0.1:4001: start 2002.46, written 2002.49, finished 2002.53 ms
Message 2 returned: '47 second
148 second
149 second
150 s'
Delay completed
127.0.0.1:4000 successfully sent 10 bytes of data
127.0.0.1:4000: start 3002.36, written 3002.39, finished 3002.41 ms
Message 2 returned: 'first
159 first
160 first
161 first
'
And the server side receives the following messages in sequence:
Full Code
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <boost/bind.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/shared_ptr.hpp>
#include <chrono>
#include <iostream>
#define MEM_FN2(x, y, z) boost::bind(&self_type::x, shared_from_this(), y, z)
namespace ba = boost::asio;
using ba::ip::udp;
using boost::system::error_code;
using ba::asio_handler_invoke;
////////////////////////////////////////////////////
// timing stuff
using namespace std::chrono_literals;
using hrclock = std::chrono::high_resolution_clock;
using time_point = hrclock::time_point;
static double relatime(time_point tp = hrclock::now()) {
static const time_point t_start = hrclock::now();
return (tp - t_start)/1.0ms;
}
////////////////////////////////////////////////////
// message operation - with F continuation
template <typename F>
class message_operation : public boost::enable_shared_from_this<message_operation<F> >, boost::noncopyable {
typedef message_operation self_type;
template <typename F_>
friend void async_message(udp::socket&, std::string const&, F_&&);
private:
template <typename F_>
message_operation(udp::socket& s, std::string message, F_&& handler)
: _socket(s), _endpoint(s.remote_endpoint()), handler_(std::forward<F_>(handler)), message_(std::move(message)) {}
using boost::enable_shared_from_this<message_operation>::shared_from_this;
void do_write() {
t0 = hrclock::now(); // Time of starting to write
_socket.async_send_to(ba::buffer(message_), _endpoint, MEM_FN2(on_write, _1, _2));
}
void on_write(const error_code & err, size_t bytes) {
t1 = hrclock::now(); // Time of finished writting
if (err)
handler_(err, "");
else
{
std::cout << _endpoint << " successfully sent " << bytes << " bytes of data\n";
do_read();
}
}
void do_read() {
_socket.async_receive_from(ba::buffer(read_buffer_), _sender, MEM_FN2(on_read, _1, _2));
}
void on_read(const error_code &err, size_t bytes) {
t2 = hrclock::now(); // Time of finished reading
if (!err) {
std::cout << _endpoint
<< ": start " << relatime(t0)
<< ", written " << relatime(t1)
<< ", finished " << relatime(t2)
<< " ms\n";
handler_(err, std::string(read_buffer_, bytes));
} else {
std::cout << "Error occured in reading data from server\n";
}
}
time_point t0, t1, t2; // Time of starting to write, finished writting, finished reading
// params
udp::socket& _socket;
udp::endpoint _endpoint;
F handler_;
// sending
std::string message_;
// receiving
udp::endpoint _sender;
char read_buffer_[46];
};
template <typename F_>
void async_message(udp::socket& s, std::string const& message, F_&& handler) {
using Op = message_operation<F_>;
boost::shared_ptr<Op> new_(new Op(s, message, std::forward<F_>(handler)));
new_->do_write();
}
////////////////////////////////////////////////////
// basic protocol (2 messages, 1 delay)
struct ApplicationProtocol {
ApplicationProtocol(ba::io_service& service, udp::endpoint ep, std::string m1, std::string m2, std::chrono::seconds delay = 3s)
: _service(service),
_endpoint(ep),
message1(std::move(m1)), message2(std::move(m2)),
delay(delay), timer(service)
{ }
void go() {
_socket.async_connect(_endpoint, boost::bind(&ApplicationProtocol::on_connect, this, _1));
}
private:
ba::io_service& _service;
udp::socket _socket{_service};
udp::endpoint _endpoint;
std::string message1, message2;
std::chrono::seconds delay;
ba::high_resolution_timer timer;
void on_connect(error_code ec) {
std::cout << _endpoint << " connected at " << relatime() << " ms\n";
if (!ec) {
async_message(_socket, message1, boost::bind(&ApplicationProtocol::on_message1_sent, this, _1, _2));
} else {
std::cout << "Socket had a problem for connecting to server.";
}
}
void on_message1_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 1 failed: " << ec.message() << "\n";
else {
std::cout << "Message 1 returned: '" << response << "'\n";
timer.expires_from_now(delay);
timer.async_wait(boost::bind(&ApplicationProtocol::on_delay_complete, this, _1));
}
}
void on_delay_complete(error_code ec) {
if (ec)
std::cout << "Delay faile: " << ec.message() << "\n";
else {
std::cout << "Delay completed\n";
async_message(_socket, message2, boost::bind(&ApplicationProtocol::on_message2_sent, this, _1, _2));
}
}
void on_message2_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 2 failed: " << ec.message() << "\n";
else {
std::cout << "Message 2 returned: '" << response << "'\n";
}
}
};
int main() {
ba::io_service service;
relatime(); // start the clock
std::cout.precision(2);
std::cout << std::fixed;
ApplicationProtocol
channel1(service, {{}, 4000}, "Message01\n", "Message02\n", 3s),
channel2(service, {{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
channel1.go();
channel2.go();
service.run();
}
In addition to the "normal" answer posted before, here's one that does exactly the same but using coroutines:
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <iostream>
namespace ba = boost::asio;
using ba::ip::udp;
using boost::system::error_code;
////////////////////////////////////////////////////
// timing stuff
using namespace std::chrono_literals;
using hrclock = std::chrono::high_resolution_clock;
using time_point = hrclock::time_point;
static double relatime(time_point tp = hrclock::now()) {
static const time_point t_start = hrclock::now();
return (tp - t_start)/1.0ms;
}
int main() {
ba::io_service service;
relatime(); // start the clock
std::cout.precision(2);
std::cout << std::fixed;
auto go = [&](udp::endpoint ep, std::string const& m1, std::string const& m2, hrclock::duration delay) {
ba::spawn(service, [=,&service](ba::yield_context yield) {
udp::socket sock(service);
time_point t0, t1, t2;
auto async_message = [&](std::string const& message) {
t0 = hrclock::now();
auto bytes = sock.async_send_to(ba::buffer(message), ep, yield);
t1 = hrclock::now();
char read_buffer_[46];
udp::endpoint _sender;
bytes = sock.async_receive_from(ba::buffer(read_buffer_), _sender, yield);
t2 = hrclock::now();
return std::string {read_buffer_, bytes};
};
try {
sock.async_connect(ep, yield);
std::cout << ep << " connected at " << relatime() << " ms\n";
std::cout << "Message 1 returned: '" << async_message(m1) << "'\n";
std::cout << ep << ": start " << relatime(t0) << ", written " << relatime(t1) << ", finished " << relatime(t2) << " ms\n";
ba::high_resolution_timer timer(service, delay);
timer.async_wait(yield);
std::cout << "Message 2 returned: '" << async_message(m2) << "'\n";
std::cout << ep << ": start " << relatime(t0) << ", written " << relatime(t1) << ", finished " << relatime(t2) << " ms\n";
} catch(std::exception const& e) {
std::cout << ep << " error: " << e.what() << "\n";
}
});
};
go({{}, 4000}, "Message01\n", "Message02\n", 3s),
go({{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
service.run();
}
As you can see, using coroutines has the luxury of having all coro state "implicitly" on the coro stack. This means: no more adhoc classes for async operations with state, and vastly reduced lifetime issues.
Output
0.0.0.0:4000 connected at 0.52 ms
Message 1 returned: '0.0.0.0:4001 connected at 0.64 ms
Message 1 returned: ' 1 first
2 first
3 first
4 '
0.0.0.0:4000: start 0.55, written 0.68, finished 0.86 ms
1 second
2 second
3 second
'
0.0.0.0:4001: start 0.65, written 0.70, finished 0.91 ms
Message 2 returned: '47 second
148 second
149 second
150 s'
0.0.0.0:4001: start 2001.03, written 2001.06, finished 2001.07 ms
Message 2 returned: 'first
159 first
160 first
161 first
'
0.0.0.0:4000: start 3001.10, written 3001.15, finished 3001.16 ms

How do I get boost.msm to properly change state when using a signal handler to trigger events?

My (boost.msm) state machine appears to 'roll-back' when using signal handlers
to trigger events. However, when I use direct calls to trigger events the state
machine behaves correctly.
I looked in the boost documentation and searched the web, but it seems that
all of the examples use direct calls for event triggering. I also searched SO,
but couldn't find anything addressing this topic.
I'm in the process of learning the boost meta state machine library to see if
it would be useful to replace the existing "home grown" state machine library
currently used by my development team.
In order for this to work, I'll need to be able to trigger state machine events
from signal handlers (handling signals from boost.signals2).
I created a simple, but contrived, example to give it a test run and was
baffled when I saw that after the first event was triggered, the state
machine correctly (but temporarily) changed states (while in the signal handler)
but apparently 'rolled back' after returning to main.
When I bypassed the signal handlers (by using direct calls to process_event)
everything worked correctly.
The, admittedly contrived, test state machine is designed to do this:
[state_a]--event_a-->[state_b]--event_b-->[state_c]--event_c-->{back-to-state_a}
I would like to know how I can make this design (or something similar) work
using signal handlers to trigger state machine events correctly. Using direct
calls isn't an option for me since I only receive signals to work with.
I've included the test code below. Note that the first half of the main
function exercises the signal handler triggering and the second half of main
exercises the direct call triggering
(compiled using g++ main.cpp -omain' or 'clang++ main.cpp -omain):
#include <iostream>
#include <boost/signals2.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/msm/back/state_machine.hpp>
#include <boost/msm/back/tools.hpp>
#include <boost/msm/front/state_machine_def.hpp>
#include <boost/msm/front/functor_row.hpp>
typedef boost::signals2::signal<void()> sig1_t;
//================================================================================
// ------- Sensors section
struct sensor_a {
sig1_t& get_sig() { return sig; }
void emit() { sig(); }
private:
sig1_t sig;
};
struct sensor_b {
sig1_t& get_sig() { return sig; }
void emit() { sig(); }
private:
sig1_t sig;
};
struct sensor_c {
sig1_t& get_sig() { return sig; }
void emit() { sig(); }
private:
sig1_t sig;
};
//========================================
// Sensors class
struct Sensors {
sensor_a& get_sa() {
return sa;
}
sensor_b& get_sb() {
return sb;
}
sensor_c& get_sc() {
return sc;
}
private:
sensor_a sa;
sensor_b sb;
sensor_c sc;
};
// ----- Events
struct event_a {
std::string name() const { return "event_a"; }
};
struct event_b {
std::string name() const { return "event_b"; }
};
struct event_c {
std::string name() const { return "event_c"; }
};
struct exit {
std::string name() const { return "exit"; }
};
//================================================================================
// ----- State machine section
namespace msm = boost::msm;
namespace msmf = boost::msm::front;
namespace mpl = boost::mpl;
class Controller; // forward declaration
//========================================
// testmachine class (the state machine)
struct testmachine : msmf::state_machine_def<testmachine>
{
testmachine(Controller& c) : controller(c) {}
template <class Fsm,class Event>
void no_transition(Event const& e, Fsm& ,int state) {
std::cout << "testmachine::no_transition -- No transition for event: '"
<< e.name() << "'" << " on state: " << state << std::endl;
}
//---------
struct state_a : msmf::state<> {
template <class Event,class Fsm>
void on_entry(Event const&, Fsm&) const {
std::cout << "state_a::on_entry() " << std::endl;
}
template <class Event,class Fsm>
void on_exit(Event const&, Fsm&) const {
std::cout << "state_a::on_exit()" << std::endl;
}
};
//---------
struct state_b : msmf::state<> {
template <class Event,class Fsm>
void on_entry(Event const& e, Fsm&) const {
std::cout << "state_b::on_entry() -- event: " << e.name() << std::endl;
}
template <class Event,class Fsm>
void on_exit(Event const& e, Fsm&) const {
std::cout << "state_b::on_exit() -- event: " << e.name() << std::endl;
}
};
//---------
struct state_c : msmf::state<> {
template <class Event,class Fsm>
void on_entry(Event const& e, Fsm&) const {
std::cout << "state_c::on_entry() -- event: " << e.name() << std::endl;
}
template <class Event,class Fsm>
void on_exit(Event const& e, Fsm&) const {
std::cout << "state_c::on_exit() -- event: " << e.name() << std::endl;
}
};
//---------
// Set initial state
typedef mpl::vector<state_a> initial_state;
//---------
// Transition table
struct transition_table:mpl::vector<
// Start Event Next Action Guard
msmf::Row < state_a, event_a, state_b, msmf::none, msmf::none >,
msmf::Row < state_b, event_b, state_c, msmf::none, msmf::none >,
msmf::Row < state_c, event_c, state_a, msmf::none, msmf::none >
> {};
private:
Controller& controller;
};
// state-machine back-end
typedef msm::back::state_machine<testmachine> TestMachine;
//================================================================================
// --------- controller section
namespace msm = boost::msm;
namespace mpl = boost::mpl;
// debug print helper:
std::string demangle(const std::string& mangled) {
int status;
char* c_name = abi::__cxa_demangle(mangled.c_str(), 0, 0, &status);
if(c_name){
std::string retval(c_name);
free((void*)c_name);
return retval;
}
return mangled;
}
// debug print helper (from boost msm documentation):
void pstate(TestMachine const& sm) {
typedef TestMachine::stt Stt;
typedef msm::back::generate_state_set<Stt>::type all_states;
static char const* state_names[mpl::size<all_states>::value];
mpl::for_each<all_states,boost::msm::wrap<mpl::placeholders::_1> >
(msm::back::fill_state_names<Stt>(state_names));
for (unsigned int i=0;i<TestMachine::nr_regions::value;++i){
std::cout << " -> " << demangle(state_names[sm.current_state()[i]])
<< std::endl;
}
}
//========================================
// Controller class
struct Controller {
Controller(Sensors& s) :
sensors(s),
tm(boost::ref(*this)) {
s.get_sa().get_sig().connect(boost::bind(&Controller::on_sa_event, *this));
s.get_sb().get_sig().connect(boost::bind(&Controller::on_sb_event, *this));
s.get_sc().get_sig().connect(boost::bind(&Controller::on_sc_event, *this));
tm.start();
}
void on_sa_event() {
std::cout << "Controller::on_sa_event function entered ++++++++" << std::endl;
current_state(__FUNCTION__);
trigger_event_a();
current_state(__FUNCTION__);
std::cout << "Controller::on_sa_event function exiting --------" << std::endl;
};
void on_sb_event() {
std::cout << "Controller::on_sb_event function entered ++++++++" << std::endl;
current_state(__FUNCTION__);
trigger_event_b();
current_state(__FUNCTION__);
std::cout << "Controller::on_sb_event function exiting --------" << std::endl;
};
void on_sc_event() {
std::cout << "Controller::on_sc_event function entered ++++++++" << std::endl;
current_state(__FUNCTION__);
trigger_event_c();
current_state(__FUNCTION__);
std::cout << "Controller::on_sc_event function exiting --------" << std::endl;
};
// debug print function
void current_state(const std::string& f) {
std::cout << "\nController::current_state ("
<< "called from function: " << f
<<")" << std::endl;
pstate(tm);
std::cout << std::endl;
}
void trigger_event_a() {
std::cout << "Controller::trigger_event_a" << std::endl;
tm.process_event(event_a());
current_state(__FUNCTION__);
}
void trigger_event_b() {
std::cout << "Controller::trigger_event_b" << std::endl;
tm.process_event(event_b());
current_state(__FUNCTION__);
}
void trigger_event_c() {
std::cout << "Controller::trigger_event_c" << std::endl;
tm.process_event(event_c());
current_state(__FUNCTION__);
}
private:
Sensors& sensors;
TestMachine tm;
};
//================================================================================
// --------- main
int main() {
Sensors sensors;
Controller controller(sensors);
std::cout << "Exercise state machine using signal handlers (fails):" << std::endl;
controller.current_state("***** main");
sensors.get_sa().emit();
controller.current_state("***** main");
sensors.get_sb().emit();
controller.current_state("***** main");
sensors.get_sc().emit();
controller.current_state("***** main");
std::cout << "\nExercise state machine using direct calls (works):" << std::endl;
controller.current_state("***** main");
controller.trigger_event_a();
controller.current_state("***** main");
controller.trigger_event_b();
controller.current_state("***** main");
controller.trigger_event_c();
controller.current_state("***** main");
}
Here is the output:
1 state_a::on_entry()
2 Exercise state machine using signal handlers (fails):
3 Controller::current_state (called from function: ***** main)
4 -> testmachine::state_a
5 Controller::on_sa_event function entered ++++++++
6 Controller::current_state (called from function: on_sa_event)
7 -> testmachine::state_a
8 Controller::trigger_event_a
9 state_a::on_exit()
10 state_b::on_entry() -- event: event_a
11 Controller::current_state (called from function: trigger_event_a)
12 -> testmachine::state_b
13 Controller::current_state (called from function: on_sa_event)
14 -> testmachine::state_b
15 Controller::on_sa_event function exiting --------
16 Controller::current_state (called from function: ***** main)
17 -> testmachine::state_a
18 Controller::on_sb_event function entered ++++++++
19 Controller::current_state (called from function: on_sb_event)
20 -> testmachine::state_a
21 Controller::trigger_event_b
22 testmachine::no_transition -- No transition for event: 'event_b' on state: 0
23 Controller::current_state (called from function: trigger_event_b)
24 -> testmachine::state_a
25 Controller::current_state (called from function: on_sb_event)
26 -> testmachine::state_a
27 Controller::on_sb_event function exiting --------
28 Controller::current_state (called from function: ***** main)
29 -> testmachine::state_a
30 Controller::on_sc_event function entered ++++++++
31 Controller::current_state (called from function: on_sc_event)
32 -> testmachine::state_a
33 Controller::trigger_event_c
34 testmachine::no_transition -- No transition for event: 'event_c' on state: 0
35 Controller::current_state (called from function: trigger_event_c)
36 -> testmachine::state_a
37 Controller::current_state (called from function: on_sc_event)
38 -> testmachine::state_a
39 Controller::on_sc_event function exiting --------
40 Controller::current_state (called from function: ***** main)
41 -> testmachine::state_a
42 Exercise state machine using direct calls (works):
43 Controller::current_state (called from function: ***** main)
44 -> testmachine::state_a
45 Controller::trigger_event_a
46 state_a::on_exit()
47 state_b::on_entry() -- event: event_a
48 Controller::current_state (called from function: trigger_event_a)
49 -> testmachine::state_b
50 Controller::current_state (called from function: ***** main)
51 -> testmachine::state_b
52 Controller::trigger_event_b
53 state_b::on_exit() -- event: event_b
54 state_c::on_entry() -- event: event_b
55 Controller::current_state (called from function: trigger_event_b)
56 -> testmachine::state_c
57 Controller::current_state (called from function: ***** main)
58 -> testmachine::state_c
59 Controller::trigger_event_c
60 state_c::on_exit() -- event: event_c
61 state_a::on_entry()
62 Controller::current_state (called from function: trigger_event_c)
63 -> testmachine::state_a
64 Controller::current_state (called from function: ***** main)
65 -> testmachine::state_a
I added line numbers by post-processing the output file for easier reference.
Line 01 of the output shows that the state machine correctly transitioned from
the initial pseudo-state to state_a.
Line 14 of the output shows that the state machine correctly transitioned from
state_a to state_b when inside of the on_sa_event function.
However, line 17 shows the state machine returned to state_a when tested from
main (!)
The state machine remains in state_a for the remaining transitions of the
signal handler tests (lines 18-41), resulting in a few 'No Transition' error
messages.
For the direct call exercise (output lines 42-65), the state machine transitions
correctly through all states and there is no difference in it's 'current state'
from within the triggering function and when in main (after the triggering
function call).
Environment:
OS: "Ubuntu 16.04 LTS"
g++ version: (Ubuntu 5.3.1-14ubuntu2) 5.3.1 20160413
boost version: boost_1_60_0
The problem is caused by copying *this. See the following code. boost::bind copies *this. Each copied *this is at the initial state (state_a). That's why you experienced the rollback.
s.get_sa().get_sig().connect(boost::bind(&Controller::on_sa_event, *this));
s.get_sb().get_sig().connect(boost::bind(&Controller::on_sb_event, *this));
s.get_sc().get_sig().connect(boost::bind(&Controller::on_sc_event, *this));
If you copy the this pointer as follows, your code works as you expected.
s.get_sa().get_sig().connect(boost::bind(&Controller::on_sa_event, this));
s.get_sb().get_sig().connect(boost::bind(&Controller::on_sb_event, this));
s.get_sc().get_sig().connect(boost::bind(&Controller::on_sc_event, this));
You can also bind the reference of *this as follows:
s.get_sa().get_sig().connect(boost::bind(&Controller::on_sa_event, boost::ref(*this)));
s.get_sb().get_sig().connect(boost::bind(&Controller::on_sb_event, boost::ref(*this)));
s.get_sc().get_sig().connect(boost::bind(&Controller::on_sc_event, boost::ref(*this)));

What's wrong with this boost::asio and boost::coroutine usage pattern?

In this question I described boost::asio and boost::coroutine usage pattern which causes random crashes of my application and I published extract from my code and valgrind and GDB output.
In order to investigate the problem further I created smaller proof of concept application which applies the same pattern. I saw that the same problem arises in the smaller program which source I publish here.
The code starts a few threads and creates a connection pool with a few dummy connections (user supplied numbers). Additional arguments are unsigned integer numbers which plays the role of fake requests. The dummy implementation of sendRequest function just starts asynchronous timer for waiting number of seconds equal to the input number and yileds from the function.
Can someone see the problem with this code and can he propose some fix for it?
#include "asiocoroutineutils.h"
#include "concurrentqueue.h"
#include <iostream>
#include <thread>
#include <boost/lexical_cast.hpp>
using namespace std;
using namespace boost;
using namespace utils;
#define id this_thread::get_id() << ": "
// ---------------------------------------------------------------------------
/*!
* \brief This is a fake Connection class
*/
class Connection
{
public:
Connection(unsigned connectionId)
: _id(connectionId)
{
}
unsigned getId() const
{
return _id;
}
void sendRequest(asio::io_service& ioService,
unsigned seconds,
AsioCoroutineJoinerProxy,
asio::yield_context yield)
{
cout << id << "Connection " << getId()
<< " Start sending: " << seconds << endl;
// waiting on this timer is palceholder for any asynchronous operation
asio::steady_timer timer(ioService);
timer.expires_from_now(chrono::seconds(seconds));
coroutineAsyncWait(timer, yield);
cout << id << "Connection " << getId()
<< " Received response: " << seconds << endl;
}
private:
unsigned _id;
};
typedef std::unique_ptr<Connection> ConnectionPtr;
typedef std::shared_ptr<asio::steady_timer> TimerPtr;
// ---------------------------------------------------------------------------
class ConnectionPool
{
public:
ConnectionPool(size_t connectionsCount)
{
for(size_t i = 0; i < connectionsCount; ++i)
{
cout << "Creating connection: " << i << endl;
_connections.emplace_back(new Connection(i));
}
}
ConnectionPtr getConnection(TimerPtr timer,
asio::yield_context& yield)
{
lock_guard<mutex> lock(_mutex);
while(_connections.empty())
{
cout << id << "There is no free connection." << endl;
_timers.emplace_back(timer);
timer->expires_from_now(
asio::steady_timer::clock_type::duration::max());
_mutex.unlock();
coroutineAsyncWait(*timer, yield);
_mutex.lock();
cout << id << "Connection was freed." << endl;
}
cout << id << "Getting connection: "
<< _connections.front()->getId() << endl;
ConnectionPtr connection = std::move(_connections.front());
_connections.pop_front();
return connection;
}
void addConnection(ConnectionPtr connection)
{
lock_guard<mutex> lock(_mutex);
cout << id << "Returning connection " << connection->getId()
<< " to the pool." << endl;
_connections.emplace_back(std::move(connection));
if(_timers.empty())
return;
auto timer = _timers.back();
_timers.pop_back();
auto& ioService = timer->get_io_service();
ioService.post([timer]()
{
cout << id << "Wake up waiting getConnection." << endl;
timer->cancel();
});
}
private:
mutex _mutex;
deque<ConnectionPtr> _connections;
deque<TimerPtr> _timers;
};
typedef unique_ptr<ConnectionPool> ConnectionPoolPtr;
// ---------------------------------------------------------------------------
class ScopedConnection
{
public:
ScopedConnection(ConnectionPool& pool,
asio::io_service& ioService,
asio::yield_context& yield)
: _pool(pool)
{
auto timer = make_shared<asio::steady_timer>(ioService);
_connection = _pool.getConnection(timer, yield);
}
Connection& get()
{
return *_connection;
}
~ScopedConnection()
{
_pool.addConnection(std::move(_connection));
}
private:
ConnectionPool& _pool;
ConnectionPtr _connection;
};
// ---------------------------------------------------------------------------
void sendRequest(asio::io_service& ioService,
ConnectionPool& pool,
unsigned seconds,
asio::yield_context yield)
{
cout << id << "Constructing request ..." << endl;
AsioCoroutineJoiner joiner(ioService);
ScopedConnection connection(pool, ioService, yield);
asio::spawn(ioService, bind(&Connection::sendRequest,
connection.get(),
std::ref(ioService),
seconds,
AsioCoroutineJoinerProxy(joiner),
placeholders::_1));
joiner.join(yield);
cout << id << "Processing response ..." << endl;
}
// ---------------------------------------------------------------------------
void threadFunc(ConnectionPool& pool,
ConcurrentQueue<unsigned>& requests)
{
try
{
asio::io_service ioService;
while(true)
{
unsigned request;
if(!requests.tryPop(request))
break;
cout << id << "Scheduling request: " << request << endl;
asio::spawn(ioService, bind(sendRequest,
std::ref(ioService),
std::ref(pool),
request,
placeholders::_1));
}
ioService.run();
}
catch(const std::exception& e)
{
cerr << id << "Error: " << e.what() << endl;
}
}
// ---------------------------------------------------------------------------
int main(int argc, char* argv[])
{
if(argc < 3)
{
cout << "Usage: ./async_request poolSize threadsCount r0 r1 ..."
<< endl;
return -1;
}
try
{
auto poolSize = lexical_cast<size_t>(argv[1]);
auto threadsCount = lexical_cast<size_t>(argv[2]);
ConcurrentQueue<unsigned> requests;
for(int i = 3; i < argc; ++i)
{
auto request = lexical_cast<unsigned>(argv[i]);
requests.tryPush(request);
}
ConnectionPoolPtr pool(new ConnectionPool(poolSize));
vector<unique_ptr<thread>> threads;
for(size_t i = 0; i < threadsCount; ++i)
{
threads.emplace_back(
new thread(threadFunc, std::ref(*pool), std::ref(requests)));
}
for_each(threads.begin(), threads.end(), mem_fn(&thread::join));
}
catch(const std::exception& e)
{
cerr << "Error: " << e.what() << endl;
}
return 0;
}
Here are some helper utilities used by the above code:
#pragma once
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/spawn.hpp>
namespace utils
{
inline void coroutineAsyncWait(boost::asio::steady_timer& timer,
boost::asio::yield_context& yield)
{
boost::system::error_code ec;
timer.async_wait(yield[ec]);
if(ec && ec != boost::asio::error::operation_aborted)
throw std::runtime_error(ec.message());
}
class AsioCoroutineJoiner
{
public:
explicit AsioCoroutineJoiner(boost::asio::io_service& io)
: _timer(io), _count(0) {}
void join(boost::asio::yield_context yield)
{
assert(_count > 0);
_timer.expires_from_now(
boost::asio::steady_timer::clock_type::duration::max());
coroutineAsyncWait(_timer, yield);
}
void inc()
{
++_count;
}
void dec()
{
assert(_count > 0);
--_count;
if(0 == _count)
_timer.cancel();
}
private:
boost::asio::steady_timer _timer;
std::size_t _count;
}; // AsioCoroutineJoiner class
class AsioCoroutineJoinerProxy
{
public:
AsioCoroutineJoinerProxy(AsioCoroutineJoiner& joiner)
: _joiner(joiner)
{
_joiner.inc();
}
AsioCoroutineJoinerProxy(const AsioCoroutineJoinerProxy& joinerProxy)
: _joiner(joinerProxy._joiner)
{
_joiner.inc();
}
~AsioCoroutineJoinerProxy()
{
_joiner.dec();
}
private:
AsioCoroutineJoiner& _joiner;
}; // AsioCoroutineJoinerProxy class
} // utils namespace
For completeness of the code the last missing part is ConcurrentQueue class. It is too long to paste it here, but if you want you can find it here.
Example usage of the application is:
./connectionpooltest 3 3 5 7 8 1 0 9 2 4 3 6
where the first number 3 are fake connections count and the second number 3 are the number of used threads. Numbers after them are fake requests.
The output of valgrind and GDB is the same as in the mentioned above question.
Used version of boost is 1.57. The compiler is GCC 4.8.3. The operating system is CentOS Linux release 7.1.1503
It seems that all valgrind errors are caused because of BOOST_USE_VALGRIND macro is not defined as Tanner Sansbury points in comment related to this question. It seems that except this the program is correct.

How to get boost::asio::io_service current action number

Boost::asio::io_service provides "handler tracking" for debugging purposes, it is enabled by defining BOOST_ASIO_ENABLE_HANDLER_TRACKING but logs its data to stderr. I'd like to use this tracking information in my application. My question is what is the best way to get access to the <action> inside my application?
For more context as to why I want to do this; I would like to attach the <action> as a parameter to other async operations so that I can track where the originating request came from.
Asio does not expose its handler tracking data. Attempting to extract the tracking information contained within Asio would be far more of a dirty hack than rolling ones own custom handler.
Here is a snippet from Asio's handler tracking:
namespace boost {
namespace asio {
namespace detail {
class handler_tracking
{
public:
class completion;
// Base class for objects containing tracked handlers.
class tracked_handler
{
private:
// Only the handler_tracking class will have access to the id.
friend class handler_tracking;
friend class completion;
uint64_t id_;
// ...
private:
friend class handler_tracking;
uint64_t id_;
bool invoked_;
completion* next_;
};
// ...
private:
struct tracking_state;
static tracking_state* get_state();
};
} // namespace detail
} // namespace asio
} // namespace boost
As others have mentioned, passing a GUID throughout the handlers would allow one to associate multiple asynchronous operations. One non-intrusive way to accomplish this is to create a custom tracking handler type that wraps existing handlers and manages the tracking data. For an example on custom handlers, see the Boost.Asio Invocation example.
Also, be aware that if a custom handler type is used, one should be very careful when composing handlers. In particular, the custom handler type's invocation hook (asio_handler_invoke()) may need to account for the context of other handlers. For example, if one does not explicitly account for wrapped handler returned from strand::wrap(), then it will prevent intermediate operations from running in the correct context for composed operations. To avoid having to explicitly handle this, one can wrap the custom handler by strand::wrap():
boost::asio::async_read(..., strand.wrap(tracker.wrap(&handle_read))); // Good.
boost::asio::async_read(..., tracker.wrap(strand.wrap(&handle_read))); // Bad.
An example that mimics asio debug handler tracking. Caveats:
Assumes ioService only run from a single thread. I never use any other way so I'm not sure what needs to change to fix this limitation.
Non-thread safe access to std::cerr - fixing this left as an exercise.
Code:
#include <boost/asio.hpp>
#include <boost/atomic.hpp>
#include <iostream>
class HandlerTracking
{
public:
HandlerTracking()
:
mCount(1)
{ }
template <class Handler>
class WrappedHandler
{
public:
WrappedHandler(HandlerTracking& t, Handler h, std::uint64_t id)
:
mHandlerTracking(t),
mHandler(h),
mId(id)
{ }
WrappedHandler(const WrappedHandler& other)
:
mHandlerTracking(other.mHandlerTracking),
mHandler(other.mHandler),
mId(other.mId),
mInvoked(other.mInvoked)
{
other.mInvoked = true;
}
~WrappedHandler()
{
if (!mInvoked)
std::cerr << '~' << mId << std::endl;
}
template <class... Args>
void operator()(Args... args)
{
mHandlerTracking.mCurrHandler = mId;
std::cerr << '>' << mId << std::endl;
try
{
mInvoked = true;
mHandler(args...);
}
catch(...)
{
std::cerr << '!' << mId << std::endl;
throw;
}
std::cerr << '<' << mId << std::endl;
}
const std::uint64_t id() { return mId; }
private:
HandlerTracking& mHandlerTracking;
Handler mHandler;
const std::uint64_t mId;
mutable bool mInvoked = false;
};
template <class Handler>
WrappedHandler<Handler> wrap(Handler handler)
{
auto next = mCount.fetch_add(1);
std::cerr << mCurrHandler << '*' << next << std::endl;
return WrappedHandler<Handler>(*this, handler, next);
}
boost::atomic<std::uint64_t> mCount;
std::uint64_t mCurrHandler = 0; // Note: If ioService run on multiple threads we need a curr handler per thread
};
// Custom invokation hook for wrapped handlers
//template <typename Function, typename Handler>
//void asio_handler_invoke(Function f, HandlerTracking::WrappedHandler<Handler>* h)
//{
// std::cerr << "Context: " << h << ", " << h->id() << ", " << f.id() << std::endl;
// f();
//}
// Class to demonstrate callback with arguments
class MockSocket
{
public:
MockSocket(boost::asio::io_service& ioService) : mIoService(ioService) {}
template <class Handler>
void async_read(Handler h)
{
mIoService.post([h]() mutable { h(42); }); // we always read 42 bytes
}
private:
boost::asio::io_service& mIoService;
};
int main(int argc, char* argv[])
{
boost::asio::io_service ioService;
HandlerTracking tracking;
MockSocket socket(ioService);
std::function<void()> f1 = [&]() { std::cout << "Handler1" << std::endl; };
std::function<void()> f2 = [&]() { std::cout << "Handler2" << std::endl; ioService.post(tracking.wrap(f1)); };
std::function<void()> f3 = [&]() { std::cout << "Handler3" << std::endl; ioService.post(tracking.wrap(f2)); };
std::function<void()> f4 = [&]() { std::cout << "Handler4" << std::endl; ioService.post(tracking.wrap(f3)); };
std::function<void(int)> s1 = [](int s) { std::cout << "Socket read " << s << " bytes" << std::endl; };
socket.async_read(tracking.wrap(s1));
ioService.post(tracking.wrap(f1));
ioService.post(tracking.wrap(f2));
ioService.post(tracking.wrap(f3));
auto tmp = tracking.wrap(f4); // example handler destroyed without invocation
ioService.run();
return 0;
}
Output:
0*1
0*2
0*3
0*4
0*5
>1
Socket read 42 bytes
<1
>2
Handler1
<2
>3
Handler2
3*6
<3
>4
Handler3
4*7
<4
>6
Handler1
<6
>7
Handler2
7*8
<7
>8
Handler1
<8
~5