I am trying to implement a basic deadline timer using this code:
class Example
{
Example(boost::asio::io_service& ios, config& cfg)
: ios_(ios), cfg_(cfg), tcp_client_(ios) {
state = new State();
boost::asio::deadline_timer t(ios, boost::posix_time::seconds(5));
t.async_wait(boost::bind(&bse_dummy_exchange::start_heartbeats,this,boost::asio::placeholders::error,boost::ref(t)));
}
~Example() = default;
void start_heartbeats(const boost::system::error_code& e,boost::asio::deadline_timer& t)
{
std::cout << "Hello, world!\n";
t.expires_from_now(boost::posix_time::seconds(5));
t.async_wait(boost::bind(&bse_dummy_exchange::start_heartbeats,this,boost::asio::placeholders::error,boost::ref(t)));
}
}
Compilation goes fine, but while executing I get this error message which I don't understand, can someone please help me with it:
Hello, world!
bse_dummy_exchange: ../nptl/pthread_mutex_lock.c:425:
__pthread_mutex_lock_full: Assertion `INTERNAL_SYSCALL_ERRNO (e, __err)
!= ESRCH || !robust' failed.
Aborted (core dumped)
You don't show the mutex - so we can't answer.
That said, about everything is going wrong with respect to async that can go wrong:
you have a memory leak (state is an owned pointer member, but you defaulted the destructor? https://www.google.com/search?q=cppreference+rule+of+three&oq=cppreference+rule+of+three&aqs=chrome..69i57j69i64.2928j0j7&sourceid=chrome&ie=UTF-8)
This is UB:
boost::asio::deadline_timer t(ios, boost::posix_time::seconds(5));
t.async_wait(boost::bind(&bse_dummy_exchange::start_heartbeats,this,boost::asio::placeholders::error,boost::ref(t)));
async_ return immediately, but the operation runs ... well ... asynchronously. In your case t is a local variable that immediately goes out of scope after the constructor returns. So, that's not gonna work.
exactly the same problem in start_heartbeats
(I'm for the sake of understanding your code assuming that Example was actually named use_dummy_exchange)
At the very least, the timer needs to have lifetime extending beyond the lifetime of the async_wait.
Minimal Fixed Version
Of course, not fixing anything related to the mutex error - that was not included:
Live On Coliru
#include <boost/asio.hpp>
#include <iostream>
struct config { };
struct TcpClient {
TcpClient(boost::asio::io_service& ios) : ios_(ios){}
private:
boost::asio::io_service& ios_;
};
struct Example {
struct State {};
std::unique_ptr<State> state;
Example(boost::asio::io_service& ios, config& cfg)
: state(std::unique_ptr<State>()),
ios_(ios),
cfg_(cfg),
tcp_client_(ios)
{
heartbeats();
}
void heartbeats(const boost::system::error_code& e = {}) {
std::cout << "Hello, world!" << std::endl;
if (!e) {
t.expires_from_now(boost::posix_time::seconds(5));
t.async_wait([this](auto ec) { heartbeats(ec); });
}
}
private:
boost::asio::io_service& ios_;
config cfg_;
TcpClient tcp_client_;
boost::asio::deadline_timer t{ios_};
};
int main() {
boost::asio::io_service ios;
config cfg;
Example ex(ios, cfg);
ios.run_for(std::chrono::seconds(12));
}
Prints
Hello, world!
Hello, world!
Hello, world!
It has no memory leak, and runs clean under UBSan/ASan
Related
As title says I'm having this problem when I can't understand why this thread
never stops running even after Client's destructor destroys asio::io_service::work variable
When I'm running this program output is always like this
Anyone sees what I'm missing here?
#include <boost/asio.hpp>
#include <thread>
#include <atomic>
#include <memory>
using namespace boost;
class Client{
public:
Client(const Client& other) = delete;
Client()
{
m_work.reset(new boost::asio::io_service::work(m_ios));
m_thread.reset(new std::thread([this]()
{
m_ios.run();
}));
}
~Client(){ close(); }
private:
void close();
asio::io_service m_ios;
std::unique_ptr<boost::asio::io_service::work> m_work;
std::unique_ptr<std::thread> m_thread;
};
void Client::close()
{
m_work.reset(nullptr);
if(m_thread->joinable())
{
std::cout << "before joining thread" << std::endl;
m_thread->join();
std::cout << "after joining thread" << std::endl;
}
}
int main()
{
{
Client client;
}
return 0;
}
EDIT
After StPiere comments I changed code to this and it worked :)
class Client{
public:
Client(const Client& other) = delete;
Client()
{
m_work.reset(new boost::asio::executor_work_guard<boost::asio::io_context::executor_type>(boost::asio::make_work_guard(m_ios)));
m_thread.reset(new std::thread([this]()
{
m_ios.run();
}));
}
~Client(){ close(); }
private:
void close();
asio::io_context m_ios;
std::unique_ptr<boost::asio::executor_work_guard<boost::asio::io_context::executor_type>> m_work;
std::unique_ptr<std::thread> m_thread;
};
I cannot reproduce the error on either compiler.
Here example for gcc 9.3 and boost 1.73
Normally the work destructor will use something like InterLockedDecrement on windows to decrement the number of outstanding works.
It looks like some compiler or io_service/work implementation issue.
As stated in comments, io_service and io_service::work are deprecated in terms of io_context and executor_work_guard.
Suppose you have some external synchronous code you cannot modify, and you require it to run async but also require it to be cancellable. If the external code is blocking then I have two options.
A) Fool the user and let my async method return immediately on cancellation, well aware that the code is still running to completion somewhere.
B) Cancel execution
I would like to implement an interface for option B
namespace externallib {
std::uint64_t timeconsuming_operation()
{
std::uint64_t count = 0;
for (auto i = 0; i < 1E+10; ++i)
{
count++;
}
return count;
}
}
template <typename R>
struct async_operation
{
struct CancelledOperationException
{
std::string what() const
{
return what_;
}
private:
std::string what_{ "Operation was cancelled." };
};
template<typename Callable>
async_operation(Callable&& c)
{
t_ = std::thread([this, c]()
{
promise_.set_value(c()); // <-- Does not care about cancel(), mostly because c() hasn't finished..
});
}
std::future<R> get()
{
return promise_.get_future();
}
void cancel()
{
promise_.set_exception(std::make_exception_ptr(CancelledOperationException()));
}
~async_operation()
{
if (t_.joinable())
t_.join();
}
private:
std::thread t_;
std::promise<R> promise_;
};
void foo()
{
async_operation<std::uint64_t> op([]()
{
return externallib::timeconsuming_operation();
});
using namespace std::chrono_literals;
std::this_thread::sleep_for(5s);
op.cancel();
op.get();
}
In the code above I cannot wrap my head around the limitation of external code being blocking, how, if at all, is it possible to cancel execution early?
Short answer:
Don't cancel/terminate thread execution unless it is mission critical. Use approach "A" instead.
Long answer:
As #Caleth noted, there is no standard nor cross platform way to do this. All you can do is to get a native handle to a thread and use platform specific function. But there are some important pit falls.
win32
You may terminate a thread with TerminateThread function, but:
stack variables will not be destructed
thread_local variables will not be destructed
DLLs will not be notified
MSDN says:
TerminateThread is a dangerous function that should only be used in
the most extreme cases.
pthread
Here situation is slightly better. You have a chance to free your resources when pthread_cancel is got called, but:
By default, target thread terminates on cancellation points. It means that you cannot cancel a code that doesn't have any cancellation point. Basically, for(;;); won't be canceled at all.
Once cancellation point is reached, implementation specific exception is thrown, so resources can be gracefully freed.
Keep in mind, that this exception can be caught by try/catch, but it's required to be re-thrown.
This behavior can be disabled by pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, nullptr);. But in case cancellation point is not met, resources won't be freed (as for win32)
Example
#include <iostream>
#include <thread>
#include <chrono>
#if defined(_WIN32)
#include <Windows.h>
void kill_thread(HANDLE thread) {
TerminateThread(thread, 0);
}
#else
#include <pthread.h>
void kill_thread(pthread_t thread) {
pthread_cancel(thread);
}
#endif
class my_class {
public:
my_class() { std::cout << "my_class::my_class()" << std::endl; }
~my_class() { std::cout << "my_class::~my_class()" << std::endl; }
};
void cpu_intensive_func() {
#if !defined(_WIN32)
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, nullptr);
#endif
my_class cls;
for(;;) {}
}
void io_func() {
my_class cls;
int a;
std::cin >> a;
}
void io_func_with_try_catch() {
my_class cls;
try {
int a;
std::cin >> a;
} catch(...) {
std::cout << "exception caught!" << std::endl;
throw;
}
}
void test_cancel(void (*thread_fn) (void)) {
std::thread t(thread_fn);
std::this_thread::sleep_for(std::chrono::seconds(1));
kill_thread(t.native_handle());
t.join();
std::cout << "thread exited" << std::endl;
std::cout << "--------------------" << std::endl;
}
int main() {
test_cancel(cpu_intensive_func);
test_cancel(io_func);
test_cancel(io_func_with_try_catch);
return 0;
}
You may see that:
The destructor is never called on windows.
Removing of pthread_setcanceltype leads to hang.
The internal pthread exception could be caught.
There is no portable way to end a thread before it wants to.
Depending on your platform, there may be ways of ending a thread, which you will probably need to get std::thread::native_handle to utilise. This is highly likely to lead to undefined behaviour, so I don't recommend it.
You can run that external synchronous code in another process and terminate that entire process. This way the interruption won't affect your process and cause undefined behaviour.
I have come across a use case where I would like to use a Boost strand in conjunction with a std::future.
To reduce code duplication, I have written a generic function which will post a task to a boost strand and return the future.
// Some definitions first...
typedef boost::asio::io_service::strand cb_strand;
typedef std::shared_ptr< cb_strand > cb_strand_ptr;
The code looks something like:
//////////////////////////////////////////////////////////////////////////
template <class Task>
auto post_future_to_strand(cb_strand_ptr apStrand, Task task)
{
using return_type = decltype(task());
auto promise = std::make_shared<std::promise<return_type>>();
auto future = promise->get_future();
apStrand->wrap
(
[promise, task]()
{
try
{
promise->set_value(task());
}
catch (...)
{
// LOG ERROR ...
// NOTE: Exceptions can be thrown when setting the exception!
try
{
promise->set_exception(std::current_exception());
}
catch (...)
{
//LOG ERROR ...
}
}
}
);
return future;
};
I then hoped to post a future to a strand as presented in the following example:
std::future<int> f = post_future_to_strand(m_apStrand, std::bind(&foo::bar, this))
std::cout << "foo::bar() -> int is " << f.get() << std::endl;
Unfortunately, I get a runtime exception:
terminate called after throwing an instance of 'std::future_error'
what(): std::future_error: Broken promise
Signal: SIGABRT (Aborted)
Having read the docs, I think I understand what a broken promise is and how the situation arises; however, I feel like I am capturing the promise in the lambda so all should be well. I am a newcomer to this world of lambdas, so perhaps my understanding is amiss.
Ubuntu Zesty
GCC 6.3 (configured for C++14 with cmake)
You wrap the task, but you never post it. Therefore, the wrapped task is immediately destructed, and with that the promise.
There's another pitfall, things only work if you run the io_service on a different thread than the one blocking for the future... Otherwise you have created a deadlock:
Live On Coliru deadlock
Now that you have multiple threads, you need to avoid the race-condition where the service exits before the task is posted in the first place.
Bonus:
I'd suggest a far simpler take on the wrapper:
template <typename Task>
auto post_future_to_strand(cb_strand_ptr apStrand, Task task)
{
auto package = std::make_shared<std::packaged_task<decltype(task())()> >(task);
auto future = package->get_future();
apStrand->post([package] { (*package)(); });
return future;
}
Full Demo
Live On Coliru
#include <boost/asio.hpp>
#include <future>
#include <iostream>
using cb_strand_ptr = boost::asio::strand*;
//////////////////////////////////////////////////////////////////////////
template <typename Task>
auto post_future_to_strand(cb_strand_ptr apStrand, Task task)
{
auto package = std::make_shared<std::packaged_task<decltype(task())()> >(task);
auto future = package->get_future();
apStrand->post([package] { (*package)(); });
return future;
}
struct Foo {
boost::asio::strand s;
cb_strand_ptr m_apStrand = &s;
Foo(boost::asio::io_service& svc) : s{svc} {}
void do_it() {
std::future<int> f = post_future_to_strand(m_apStrand, std::bind(&Foo::bar, this));
std::cout << "foo::bar() -> int is " << f.get() << std::endl;
}
int bar() {
return 42;
}
};
int main() {
boost::asio::io_service svc;
auto lock = std::make_unique<boost::asio::io_service::work>(svc); // prevent premature exit
std::thread th([&]{ svc.run(); });
Foo foo(svc);
foo.do_it();
lock.reset(); // allow service to exit
th.join();
}
Prints
foo::bar() -> int is 42
Can anybody explain me why this program does not terminate (see the comments)?
#include <boost/asio/io_service.hpp>
#include <boost/asio.hpp>
#include <memory>
#include <cstdio>
#include <iostream>
#include <future>
class Service {
public:
~Service() {
std::cout << "Destroying...\n";
io_service.post([this]() {
std::cout << "clean and stop\n"; // does not get called
// do some cleanup
// ...
io_service.stop();
std::cout << "Bye!\n";
});
std::cout << "...destroyed\n"; // last printed line, blocks
}
void operator()() {
io_service.run();
std::cout << "run completed\n";
}
private:
boost::asio::io_service io_service;
boost::asio::io_service::work work{io_service};
};
struct Test {
void start() {
f = std::async(std::launch::async, [this]() { service(); std::cout << "exiting thread\n";});
}
std::future<void> f;
Service service;
};
int main(int argc, char* argv[]) {
{
Test test;
test.start();
std::string exit;
std::cin >> exit;
}
std::cout << "exiting program\n"; // never printed
}
The real issue is that destruction of io_service is (obviously) not thread-safe.
Just reset the work and join the thread. Optionally, set a flag so your IO operations know shutdown is in progress.
You Test and Service classes are trying to share responsibility for the IO service, that doesn't work. Here's much simplified, merging the classes and dropping the unused future.
Live On Coliru
The trick was to make the work object optional<>:
#include <boost/asio.hpp>
#include <boost/optional.hpp>
#include <iostream>
#include <thread>
struct Service {
~Service() {
std::cout << "clean and stop\n";
io_service.post([this]() {
work.reset(); // let io_service run out of work
});
if (worker.joinable())
worker.join();
}
void start() {
assert(!worker.joinable());
worker = std::thread([this] { io_service.run(); std::cout << "exiting thread\n";});
}
private:
boost::asio::io_service io_service;
std::thread worker;
boost::optional<boost::asio::io_service::work> work{io_service};
};
int main() {
{
Service test;
test.start();
std::cin.ignore(1024, '\n');
std::cout << "Start shutdown\n";
}
std::cout << "exiting program\n"; // never printed
}
Prints
Start shutdown
clean and stop
exiting thread
exiting program
See here: boost::asio hangs in resolver service destructor after throwing out of io_service::run()
I think the trick here is to destroy the worker (the work member) before calling io_service.stop(). I.e. in this case the work could be an unique_ptr, and call reset() explicitly before stopping the service.
EDIT: The above helped me some time ago in my case, where the ioservice::stop didn't stop and was waiting for some dispatching events which never happened.
However I reproduced the problem you have on my machine and this seems to be a race condition inside ioservice, a race between ioservice::post() and the ioservice destruction code (shutdown_service). In particular, if the shutdown_service() is triggered before the post() notification wakes up the other thread, the shutdown_service() code removes the operation from the queue (and "destroys" it instead of calling it), therefore the lambda is never called then.
For now it seems to me that you'd need to call the io_service.stop() directly in the destructor, not postponed via the post() as that apparently doest not work here because of the race.
I was able to fix the problem by rewriting your code like so:
class Service {
public:
~Service() {
std::cout << "Destroying...\n";
work.reset();
std::cout << "...destroyed\n"; // last printed line, blocks
}
void operator()() {
io_service.run();
std::cout << "run completed\n";
}
private:
boost::asio::io_service io_service;
std::unique_ptr<boost::asio::io_service::work> work = std::make_unique<boost::asio::io_service::work>(io_service);
};
However, this is largely a bandaid solution.
The problem lies in your design ethos; specifically, in choosing not to tie the lifetime of the executing thread directly to the io_service object:
struct Test {
void start() {
f = std::async(std::launch::async, [this]() { service(); std::cout << "exiting thread\n";});
}
std::future<void> f; //Constructed First, deleted last
Service service; //Constructed second, deleted first
};
In this particular scenario, the thread is going to continue to attempt to execute io_service.run() past the lifetime of the io_service object itself. If more than the basic work object were executing on the service, you very quickly begin to deal with undefined behavior with calling member functions of deleted objects.
You could reverse the order of the member objects in Test:
struct Test {
void start() {
f = std::async(std::launch::async, [this]() { service(); std::cout << "exiting thread\n";});
}
Service service;
std::future<void> f;
};
But it still represents a significant design flaw.
The way that I usually implement anything which uses io_service is to tie its lifetime to the threads that are actually going to be executing on it.
class Service {
public:
Service(size_t num_of_threads = 1) :
work(std::make_unique<boost::asio::io_service::work>(io_service))
{
for (size_t thread_index = 0; thread_index < num_of_threads; thread_index++) {
threads.emplace_back([this] {io_service.run(); });
}
}
~Service() {
work.reset();
for (std::thread & thread : threads)
thread.join();
}
private:
boost::asio::io_service io_service;
std::unique_ptr<boost::asio::io_service::work> work;
std::vector<std::thread> threads;
};
Now, if you have any infinite loops active on any of these threads, you'll still need to make sure you properly clean those up, but at least the code specific to the operation of this io_service is cleaned up correctly.
This is a code which works not as designed please explain me what is wrong here (code simplified to make this more readable).
shm_server server;
std::thread s{server};
// some work...
std::cout << "press any key to stop the server";
_getch();
server.stop();
s.join();
It looks like I call a stop method for another copy of shm_server class.
because stop() only sets std::atomic_bool done; (shm_server member) to true but I see that thread function (this is operator() of shm_server) still sees done equal to false.
std::thread has only move contructor?
How to send a signal to server correctly in this typical case?
class shm_server {
std::atomic_bool done;
public:
shm_server() : done{false} {}
shm_server(shm_server& ) : done{false} {}
void operator()() {
while (!done) {
//...
}
}
void stop() {
done = true;
}
};
You have somehow shot yourself into the foot by making shm_server copyable. I believe you did this in order to get rid of the compiler error, not because copy semantics on that class are particularly useful. I'd recommend you delete that constructor again. If you really want copy semantics, have the constructor take its argument by reference to const.
class shm_server // cannot copy or move
{
std::atomic_bool done_ {};
public:
void
operator()()
{
this->run();
}
void
run()
{
using namespace std::chrono_literals;
while (!this->done_.load())
{
std::clog << std::this_thread::get_id() << " working..." << std::endl;
std::this_thread::sleep_for(1ms);
}
}
void
stop()
{
this->done_.store(true);
}
};
I have factored out the logic of the call operator into a named function for convenience. You don't need to do that but I'll use it later in the example.
Now we somehow have to make the run member function be executed on its own thread. If you write
shm_server server {};
std::thread worker {server}; // won't compile
the compiler won't let you do it as it – as you have conjectured yourself – would attempt to copy the server object which isn't copyable.
#Ben Voigt has suggested to wrap the server object into a std::reference_wrapper which is an object that behaves like a reference.
shm_server server {};
std::thread worker {std::ref(server)};
This works as intended and effectively passes a pointer to the std::thread constructor, rather than the actual server object.
However, I find that this is not the most straight-forward solution here. What you really want to do is run a member function of your server on a different thread. And std::thread's constructor lets you do just that.
shm_server server {};
std::thread worker {&shm_server::run, &server};
Here, I'm passing the address of the run member function and a pointer to the server object (that will be passed as the implicit this pointer) as argument. I have introduced the run function since the syntax might be less irritating. You can also pass the address of the call operator directly.
shm_server server {};
std::thread worker {&shm_server::operator(), &server};
Here is a complete example.
int
main()
{
using namespace std::chrono_literals;
std::clog << std::this_thread::get_id() << " starting up" << std::endl;
shm_server server {};
std::thread worker {&shm_server::operator(), &server};
//std::thread worker {std::ref(server)}; // same effect
//std::thread worker {&shm_server::run, &server}; // same effect
std::this_thread::sleep_for(10ms);
server.stop();
worker.join();
std::clog << std::this_thread::get_id() << " good bye" << std::endl;
}
Possible output:
140435324311360 starting up
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435306977024 working...
140435324311360 good bye
If you believe that a shm_server will only ever be useful if run on its own thread, you can also give it a std::thread as data member and have its constructor (or a dedicated start member function, if you prefer) start and its destructor join the thread.
#include <atomic>
#include <chrono>
#include <iostream>
#include <thread>
class shm_server
{
std::atomic_bool done_ {};
std::thread worker_ {};
public:
shm_server()
{
this->worker_ = std::thread {&shm_server::run_, this};
}
~shm_server()
{
this->done_.store(true);
if (this->worker_.joinable())
this->worker_.join();
}
private:
void
run_()
{
using namespace std::chrono_literals;
while (!this->done_.load())
{
std::clog << std::this_thread::get_id() << " working..." << std::endl;
std::this_thread::sleep_for(1ms);
}
}
};
int
main()
{
using namespace std::chrono_literals;
std::clog << std::this_thread::get_id() << " starting up" << std::endl;
{
shm_server server {};
std::this_thread::sleep_for(10ms);
}
std::clog << std::this_thread::get_id() << " good bye" << std::endl;
}