I have written a class that attempts to establish a connection with a TCP server provided with a custom timeout and a number of attempts. It is a Callable object that returns an std::future for a result.
The problems with my initial implementation are:
the object has to be persistent until either a connection has been established, or it has run out of attempts or a stop case error has occurred. So I have to store it inside my class which I hope to avoid.
asio composed operations provide means for customization for the control flow on return: a CompletionToken might be a simple callback, a future, or a coroutine could be used. In my case I have bound the user to a future.
This is my initial implementation for a connection attempt with a custom timeout and number of attempts:
template<typename Connection>
class connection_attempt
{
public:
using connection_type = Connection;
using endpoint_type = typename Connection::endpoint_type;
template<typename Endpoint>
using require_endpoint = typename std::enable_if<std::is_same<Endpoint, endpoint_type>::value>::type;
constexpr static auto default_timeout()
{
return std::chrono::milliseconds(3000);
}
constexpr static size_t infinite_attempts()
{
return size_t() - 1;
}
explicit connection_attempt(Connection &connection)
: connection_(connection)
{}
template<typename Callable>
explicit connection_attempt(Connection &connection,
Callable &&stopOnError)
: connection_(connection),
stopOnError_(std::forward<Callable>(stopOnError))
{}
template<typename Endpoint,
typename Duration,
typename = require_endpoint<Endpoint>>
std::future<bool> operator()(Endpoint &&endpoint,
size_t attempts,
Duration &&timeout = default_timeout())
{
connectionResult_ = {};
asyncConnect(std::forward<Endpoint>(endpoint),
attempts,
std::forward<Duration>(timeout));
return connectionResult_.get_future();
}
// default attempts = infinite_attempts
template<typename Endpoint,
typename Duration,
typename = require_endpoint<Endpoint>>
std::future<bool> operator()(Endpoint endpoint,
Duration &&timeout = default_timeout())
{
connectionResult_ = {};
asyncConnect(std::forward<Endpoint>(endpoint),
infinite_attempts(),
std::forward<Duration>(timeout));
return connectionResult_.get_future();
}
private:
connection_type &connection_;
asio::steady_timer timer_
{connection_.get_executor()}; // this does not compile -> {asio::get_associated_executor(connection_)};
std::function<bool(const asio::error_code &)> stopOnError_;
std::promise<bool> connectionResult_;
// cancels the connection on timeout!
template<typename Duration>
void startTimer(const Duration &timeout)
{
timer_.expires_after(timeout); // it will automatically cancel a pending timer
timer_.async_wait(
[this, timeout](const asio::error_code &errorCode)
{
// will occur on connection error before timeout
if (errorCode == asio::error::operation_aborted)
return;
// TODO: handle timer errors? What are the possible errors?
assert(!errorCode && "unexpected timer error!");
// stop current connection attempt
connection_.cancel();
});
}
void stopTimer()
{
timer_.cancel();
}
/**
* Will be trying to connect until:<br>
* - has run out of attempts
* - has been required to stop by stopOnError callback (if it was set)
* #param endpoint
* #param attempts
*/
template<typename Duration>
void asyncConnect(endpoint_type endpoint,
size_t attempts,
Duration &&timeout)
{
startTimer(timeout);
connection_.async_connect(endpoint, [this,
endpoint,
attempts,
timeout = std::forward<Duration>(timeout)](const asio::error_code &errorCode)
{
if (!errorCode)
{
stopTimer();
connectionResult_.set_value(true);
return;
}
const auto attemptsLeft = attempts == infinite_attempts() ?
infinite_attempts() :
attempts - 1;
if ((stopOnError_ &&
stopOnError_(errorCode == asio::error::operation_aborted ?
// special case for operation_aborted on timer expiration - need to send timed_out explicitly
// this should only be resulted from the timer calling cancel()
asio::error::timed_out :
errorCode)) ||
!attemptsLeft)
{
stopTimer();
connectionResult_.set_value(false);
return;
}
asyncConnect(endpoint,
attemptsLeft,
timeout);
});
}
};
// this should be an asynchornous function with a custom CompletionToken
template<typename Connection,
typename Callable>
auto make_connection_attempt(Connection &connection,
Callable &&stopOnError) -> connection_attempt<Connection>
{
return connection_attempt<Connection>(connection,
std::forward<Callable>(stopOnError));
}
However, I want to be consistent using ASIO and the Universal Model for Asynchronous Operations: control flow on return should be customizable.
I have followed through the example for sending several messages with intervals using a composed operation with a stateful intermediate handler. The handler recursively passes itself as a handler for each next asynchronous operation: async_wait and async_write. These calls are always made in turns: one is always invoked when the other has returned. In my case, however, async_wait and async_connect are invoked simultaneously:
// initiation method, called first
void operator()(args...)
{
// not valid!
timer.async_wait(std::move(*this)); // from now on this is invalid
connection.async_connect(endpoint, std::move(*this)); can't move this twice
}
This is a code for a class I am trying to implement as an initiation and an intermediate handler:
template<typename Connection, typename CompletionToken>
class composed_connection_attempt
{
public:
using connection_type = Connection;
using endpoint_type = typename Connection::endpoint_type;
enum class state
{
pending,
connected,
timeout
};
constexpr static auto default_timeout()
{
return std::chrono::milliseconds(3000);
}
constexpr static size_t infinite_attempts()
{
return size_t() - 1;
}
// TODO: executor type
using executor_type = asio::associated_executor_t<CompletionToken,
typename connection_type::executor_type>;
executor_type get_executor() const noexcept
{
// TODO: get completion handler executor
return connection_.get_executor();
}
// TODO: allocator type
using allocator_type = typename asio::associated_allocator_t<CompletionToken,
std::allocator<void>>;
allocator_type get_allocator() const noexcept
{
// TODO: get completion handler allocator
return allocator_type();
}
// TODO: constructor to initialize state, pass timeout value?
explicit composed_connection_attempt(connection_type &connection)
: connection_(connection)
{}
template<typename Callable>
composed_connection_attempt(connection_type &connection, Callable &&stopOnError)
: connection_(connection),
stopOnError_(std::forward<Callable>(stopOnError))
{}
// operator for initiation
template<typename Endpoint, typename Duration>
void operator()(Endpoint &&endpoint,
size_t attempts,
Duration timeout = default_timeout())
{
// Start timer: how to pass this
// Attempt connection
}
// intermediate completion handler
// this may be invoked without an error both by the timer and a connection
void operator()(const asio::error_code &errorCode)
{
if (!errorCode)
{
}
}
private:
Connection &connection_;
asio::steady_timer timer_{this->get_executor()};
std::atomic<state> state_{state::pending};
std::function<bool(const asio::error_code &)> stopOnError_;
std::function<void(const asio::error_code &)> completionHandler_;
};
So, the problems I am trying to resolve:
How to share ownership of a stateful intermediate handler with both a timer and a connection (socket)? Maybe I have to use nested classes (main class for initiation and nested for timer and socket events)?
How to determine which of the asynchronous calls resulted in a void operator()(const asio::error_code&) invocation? No error might be the result of a successful connection or a timeout. Both asynchronous operations also can return asio::error::operation_aborted on cancelation: the connection attempt is cancelled on timeout, timer is cancelled on success or on connection error.
Finally got around to this one:
Wow. I've just created the same without using spawn (using an operation type that uses the State struct arguments as I mentioned). I must say the complexity if this kind of library-implementor-stuff keeps surprising me. I managed to avoid the overhead of shared_from_this though, and of course all demos still pass, so I'm pretty content. If you want I can post as an alternative answer. – sehe yesterday
The Initiation Function
The initiation function is roughly the same, except it no longer uses spawn (meaning that the user doesn't have to opt-in to Boost Coroutine and Boost Context).
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep, F&& stopOn,
Token&& token, int attempts = -1,
Timer::duration delay = 3s) {
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
using Op = mylib::connection_attempt_op<std::decay_t<F>, Completion>;
// make an owning self, to be passed along a single async call chain
auto self = std::make_unique<Op>(object, ep, std::forward<F>(stopOn), completion, attempts, delay);
(*self)(self);
return result.get();
}
Now, you'll immediately spot that I used a unique-ownership container (unique_ptr). I tried to avoid the dynamic allocation, by creating a value-semantics operation class that encapsulated the handler in move-only fashion.
However, the operation also owns the timer object which needs to be reference-stable across callbacks. So, moving is not an option. Of course, we could still have a movable-value-operation type that contained just a single unique_ptr for the _timer, but that's the same overhead and less generic.
if we add another IO object to the operation state we would require a more dynamic allocations
moving a unique_ptr is strictly cheaper than a state object a multiple the size
moving the object pointed to by this inside member functions is very error-prone. E.g., this would invoke undefined behaviour:
bind_executor(_ex, std::bind(std::move(*this), ...))
That's because _ex is actually this->_ex but the evaluation is not sequenced, so this->_ex might be evaluated after the move.
It's the kind of footgun we should not want.
if we implement other async operations we can use the same pattern.
The Operation Class
You will recognize this from your original code. I opted to use my own suggestion to select operator() overloads by dispatching on a marker "state" type:
struct Init {};
struct Attempt {};
struct Response {};
To aid in binding to ourself, we also pass the owning unique_ptr as the self argument:
using Self = std::unique_ptr<connection_attempt_op>;
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
Due to limitations in std::bind we can't pass actual rvalue parameters, but
if we're careful that the call chain is strictly sequential we can always move from self precisely once
due to the indirection of the unique_ptr it is still safe to use this from the method body after moving self
we can now capture the stable value of this in our completion handler for _timer.async_wait! As long as we guarantee that the completion handler doesn't outlive the lifetime of self, we don't have to share ownership here.
shared_ptr dependence averted!
With those in mind, I think the full implementation has few surprises:
namespace mylib { // implementation details
template <typename F, typename Completion> struct connection_attempt_op {
tcp::socket& _object;
tcp::endpoint _ep;
F _stopOn;
Completion _handler;
int _attempts;
Timer::duration _delay;
using executor_type =
asio::strand<std::decay_t<decltype(_object.get_executor())>>;
executor_type _ex;
std::unique_ptr<Timer> _timer;
executor_type const& get_executor() { return _ex; }
explicit connection_attempt_op(tcp::socket& object, tcp::endpoint ep,
F stopOn, Completion handler,
int attempts, Timer::duration delay)
: _object(object),
_ep(ep),
_stopOn(std::move(stopOn)),
_handler(std::move(handler)),
_attempts(attempts),
_delay(delay),
_ex(object.get_executor()) {}
struct Init {};
struct Attempt {};
struct Response {};
using Self = std::unique_ptr<connection_attempt_op>;
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
void operator()(Self& self, Init = {}) {
// This is the only invocation perhaps not yet on the strand, so
// dispatch
asio::dispatch(_ex, std::bind(Binder{std::move(self)}, Attempt{}));
}
void operator()(Self& self, Attempt) {
if (_attempts--) {
_timer = std::make_unique<Timer>(_ex, _delay);
_timer->async_wait([this](error_code ec) {
if (!ec) _object.cancel();
});
_object.async_connect(
_ep,
asio::bind_executor(
_ex, // _object may not already have been on strand
std::bind(Binder{std::move(self)}, Response{},
std::placeholders::_1)));
} else {
_handler(mylib::result_code::attempts_exceeded, false);
}
}
void operator()(Self& self, Response, error_code ec) {
if (!ec) {
_timer.reset();
return _handler(result_code::ok, true);
}
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (_stopOn && _stopOn(ec))
return _handler(ec, false);
_timer.reset();
_object.close();
operator()(self, Attempt{});
}
};
}
Do note the executor binding; the comment in the Init{} overload as well as with the bind_executor are relevant here.
The strand is essential to maintaining the lifetime guarantees that we needed w.r.t. the async_wait operation. In particular we need the handler ordering follow this
DEMO TIME
The rest of the code is 100% identical to the other answer, so let's present it without further comment:
Live On Wandbox
//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
//#define BOOST_ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
#include <iomanip>
#ifdef STANDALONE_ASIO
using std::error_category;
using std::error_code;
using std::error_condition;
using std::system_error;
#else
namespace asio = boost::asio;
using boost::system::error_category;
using boost::system::error_code;
using boost::system::error_condition;
using boost::system::system_error;
#endif
using namespace std::chrono_literals;
using asio::ip::tcp;
using Timer = asio::steady_timer;
namespace mylib { // threw in the kitchen sink for error codes
enum class result_code {
ok = 0,
timeout = 1,
attempts_exceeded = 2,
not_implemented = 3,
};
auto const& get_result_category() {
struct impl : error_category {
const char* name() const noexcept override { return "result_code"; }
std::string message(int ev) const override {
switch (static_cast<result_code>(ev)) {
case result_code::ok: return "success";
case result_code::attempts_exceeded:
return "the maximum number of attempts was exceeded";
case result_code::timeout:
return "the operation did not complete in time";
case result_code::not_implemented:
return "feature not implemented";
default: return "unknown error";
}
}
error_condition
default_error_condition(int ev) const noexcept override {
return error_condition{ev, *this};
}
bool equivalent(int ev, error_condition const& condition)
const noexcept override {
return condition.value() == ev && &condition.category() == this;
}
bool equivalent(error_code const& error,
int ev) const noexcept override {
return error.value() == ev && &error.category() == this;
}
} const static instance;
return instance;
}
error_code make_error_code(result_code se) {
return error_code{
static_cast<std::underlying_type<result_code>::type>(se),
get_result_category()};
}
} // namespace mylib
template <>
struct boost::system::is_error_code_enum<mylib::result_code>
: std::true_type {};
namespace mylib { // implementation details
template <typename F, typename Completion> struct connection_attempt_op {
tcp::socket& _object;
tcp::endpoint _ep;
F _stopOn;
Completion _handler;
int _attempts;
Timer::duration _delay;
using executor_type =
asio::strand<std::decay_t<decltype(_object.get_executor())>>;
executor_type _ex;
std::unique_ptr<Timer> _timer;
executor_type const& get_executor() { return _ex; }
explicit connection_attempt_op(tcp::socket& object, tcp::endpoint ep,
F stopOn, Completion handler,
int attempts, Timer::duration delay)
: _object(object),
_ep(ep),
_stopOn(std::move(stopOn)),
_handler(std::move(handler)),
_attempts(attempts),
_delay(delay),
_ex(object.get_executor()) {}
struct Init {};
struct Attempt {};
struct Response {};
using Self = std::unique_ptr<connection_attempt_op>;
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
void operator()(Self& self, Init = {}) {
// This is the only invocation perhaps not yet on the strand, so
// dispatch
asio::dispatch(_ex, std::bind(Binder{std::move(self)}, Attempt{}));
}
void operator()(Self& self, Attempt) {
if (_attempts--) {
_timer = std::make_unique<Timer>(_ex, _delay);
_timer->async_wait([this](error_code ec) {
if (!ec) _object.cancel();
});
_object.async_connect(
_ep,
asio::bind_executor(
_ex, // _object may not already have been on strand
std::bind(Binder{std::move(self)}, Response{},
std::placeholders::_1)));
} else {
_handler(mylib::result_code::attempts_exceeded, false);
}
}
void operator()(Self& self, Response, error_code ec) {
if (!ec) {
_timer.reset();
return _handler(result_code::ok, true);
}
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (_stopOn && _stopOn(ec))
return _handler(ec, false);
_timer.reset();
_object.close();
operator()(self, Attempt{});
}
};
}
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep, F&& stopOn,
Token&& token, int attempts = -1,
Timer::duration delay = 3s) {
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
using Op = mylib::connection_attempt_op<std::decay_t<F>, Completion>;
// make an owning self, to be passed along a single async call chain
auto self = std::make_unique<Op>(object, ep, std::forward<F>(stopOn), completion, attempts, delay);
(*self)(self);
return result.get();
}
static auto non_recoverable = [](error_code ec) {
std::cerr << "Checking " << std::quoted(ec.message()) << "\n";
// TODO Be specific about intermittent/recoverable conditions
return false;
};
#include <set>
int main(int argc, char** argv) {
assert(argc>1);
static const tcp::endpoint ep{asio::ip::make_address(argv[1]),
8989};
std::set<std::string_view> const options{argv+1, argv+argc};
std::cout << std::boolalpha;
if (options.contains("future")) {
std::cout
<< "-----------------------\n"
<< " FUTURE DEMO\n"
<< "-----------------------" << std::endl;
asio::thread_pool ctx;
try {
tcp::socket s(ctx);
std::future<bool> ok = async_connection_attempt(
s, ep, non_recoverable, asio::use_future, 5, 800ms);
std::cout << "Future: " << ok.get() << ", " << s.is_open() << "\n";
} catch (system_error const& se) {
std::cout << "Future: " << se.code().message() << "\n";
}
ctx.join();
}
if (options.contains("coroutine")) {
std::cout
<< "-----------------------\n"
<< " COROUTINE DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
asio::spawn(ctx,
[work = make_work_guard(ctx)](asio::yield_context yc) {
auto ex = get_associated_executor(yc);
tcp::socket s(ex);
error_code ec;
if (async_connection_attempt(s, ep, non_recoverable,
yc[ec], 5, 800ms)) {
std::cout << "Connected in coro\n";
} else {
std::cout << "NOT Connected in coro: " << ec.message() << "\n";
}
});
ctx.run();
}
if (options.contains("callback")) {
std::cout
<< "-----------------------\n"
<< " CALLBACK DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
tcp::socket s(ctx);
async_connection_attempt(
s, ep, non_recoverable,
[](error_code ec, bool ok) {
std::cout << "Callback: " << ok << ", "
<< ec.message() << "\n";
},
5, 800ms);
ctx.run();
}
}
Here's another local demo with different scenarios:
So, for the second question I suggested a discriminating argument (sometimes I use a empty "state struct", like State::Init{} or State::Timeout{} to aid in overload resolution as well as self-documentation).
For the first question I'm sure you may have run into std::enable_shared_from_this since.
Here's my take on the "Universal Model". I used spawn for ease of exposition.
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep,
F&& stopOn, Token&& token,
int attempts = -1,
Timer::duration delay = 3s)
{
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
asio::spawn(
object.get_executor(),
[=, &object](asio::yield_context yc) mutable {
using mylib::result_code;
auto ex = get_associated_executor(yc);
error_code ec;
while (attempts--) {
Timer t(ex, delay);
t.async_wait([&](error_code ec) { if (!ec) object.cancel(); });
object.async_connect(ep, yc[ec]);
if(!ec)
return completion(result_code::ok, true);
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (stopOn && stopOn(ec))
return completion(ec, false);
object.close();
}
return completion(result_code::attempts_exceeded, false);
});
return result.get();
}
The key things to note are:
the async_result<> protocol will give you a completion handler that "does the magic" required by the caller (use_future, yield_context etc.)
you should be able to get away without sharing a reference as the timer can "just" have a raw pointer: the timer's lifetime is completely owned by the containing composed operation.
Full Demo: callbacks, coroutines and futures
I threw in an mylib::result_code enum to be able to return full error information:
Live On Wandbox
//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
#include <iomanip>
#ifdef STANDALONE_ASIO
using std::error_category;
using std::error_code;
using std::error_condition;
using std::system_error;
#else
namespace asio = boost::asio;
using boost::system::error_category;
using boost::system::error_code;
using boost::system::error_condition;
using boost::system::system_error;
#endif
using namespace std::chrono_literals;
using asio::ip::tcp;
using Timer = asio::steady_timer;
namespace mylib { // threw in the kitchen sink for error codes
enum class result_code {
ok = 0,
timeout = 1,
attempts_exceeded = 2,
};
auto const& get_result_category() {
struct impl : error_category {
const char* name() const noexcept override { return "result_code"; }
std::string message(int ev) const override {
switch (static_cast<result_code>(ev)) {
case result_code::ok: return "success";
case result_code::attempts_exceeded:
return "the maximum number of attempts was exceeded";
case result_code::timeout:
return "the operation did not complete in time";
default: return "unknown error";
}
}
error_condition
default_error_condition(int ev) const noexcept override {
return error_condition{ev, *this};
}
bool equivalent(int ev, error_condition const& condition)
const noexcept override {
return condition.value() == ev && &condition.category() == this;
}
bool equivalent(error_code const& error,
int ev) const noexcept override {
return error.value() == ev && &error.category() == this;
}
} const static instance;
return instance;
}
error_code make_error_code(result_code se) {
return error_code{
static_cast<std::underlying_type<result_code>::type>(se),
get_result_category()};
}
} // namespace mylib
template <>
struct boost::system::is_error_code_enum<mylib::result_code>
: std::true_type {};
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep,
F&& stopOn, Token&& token,
int attempts = -1,
Timer::duration delay = 3s)
{
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
asio::spawn(
object.get_executor(),
[=, &object](asio::yield_context yc) mutable {
using mylib::result_code;
auto ex = get_associated_executor(yc);
error_code ec;
while (attempts--) {
Timer t(ex, delay);
t.async_wait([&](error_code ec) { if (!ec) object.cancel(); });
object.async_connect(ep, yc[ec]);
if(!ec)
return completion(result_code::ok, true);
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (stopOn && stopOn(ec))
return completion(ec, false);
object.close();
}
return completion(result_code::attempts_exceeded, false);
});
return result.get();
}
static auto non_recoverable = [](error_code ec) {
std::cerr << "Checking " << std::quoted(ec.message()) << "\n";
// TODO Be specific about intermittent/recoverable conditions
return false;
};
#include <set>
int main(int argc, char** argv) {
assert(argc>1);
static const tcp::endpoint ep{asio::ip::make_address(argv[1]),
8989};
std::set<std::string_view> const options{argv+1, argv+argc};
std::cout << std::boolalpha;
if (options.contains("future")) {
std::cout
<< "-----------------------\n"
<< " FUTURE DEMO\n"
<< "-----------------------" << std::endl;
asio::thread_pool ctx;
try {
tcp::socket s(ctx);
std::future<bool> ok = async_connection_attempt(
s, ep, non_recoverable, asio::use_future, 5, 800ms);
std::cout << "Future: " << ok.get() << ", " << s.is_open() << "\n";
} catch (system_error const& se) {
std::cout << "Future: " << se.code().message() << "\n";
}
ctx.join();
}
if (options.contains("coroutine")) {
std::cout
<< "-----------------------\n"
<< " COROUTINE DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
asio::spawn(ctx,
[work = make_work_guard(ctx)](asio::yield_context yc) {
auto ex = get_associated_executor(yc);
tcp::socket s(ex);
error_code ec;
if (async_connection_attempt(s, ep, non_recoverable,
yc[ec], 5, 800ms)) {
std::cout << "Connected in coro\n";
} else {
std::cout << "NOT Connected in coro: " << ec.message() << "\n";
}
});
ctx.run();
}
if (options.contains("callback")) {
std::cout
<< "-----------------------\n"
<< " CALLBACK DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
tcp::socket s(ctx);
async_connection_attempt(
s, ep, non_recoverable,
[](error_code ec, bool ok) {
std::cout << "Callback: " << ok << ", "
<< ec.message() << "\n";
},
5, 800ms);
ctx.run();
}
}
Sample output is on the online compiler, or compare some tests on my machine:
This is how I implemented it. Code with tests can be found here on github
template<typename Connection, typename CompletionHandler>
class composed_connection_attempt
{
public:
using connection_type = Connection;
using endpoint_type = typename Connection::endpoint_type;
// TODO: clarify the type!
using completion_handler_t = CompletionHandler;
constexpr static auto default_timeout()
{
return std::chrono::milliseconds(3000);
}
constexpr static size_t infinite_attempts()
{
return size_t() - 1;
}
using executor_type = asio::associated_executor_t<
typename std::decay<CompletionHandler>::type,
typename connection_type::executor_type>;
executor_type get_executor() const noexcept
{
// TODO: get completion handler executor
return pImpl_->get_executor();
}
// TODO: allocator type
using allocator_type = typename asio::associated_allocator_t<CompletionHandler,
std::allocator<void>>;
allocator_type get_allocator() const noexcept
{
// TODO: get completion handler allocator
return pImpl_->get_allocator();
}
// TODO: constructor to initialize state, pass timeout value?
template<typename CompletionHandlerT>
explicit composed_connection_attempt(connection_type &connection,
CompletionHandlerT &&completionHandler)
: pImpl_(std::make_shared<impl>(connection,
std::forward<CompletionHandlerT>(completionHandler)))
{}
template<typename CompletionHandlerT,
typename Callable>
explicit composed_connection_attempt(connection_type &connection,
CompletionHandlerT &&completionHandler,
Callable &&stopOnError)
: pImpl_(std::make_shared<impl>(connection,
std::forward<CompletionHandlerT>(completionHandler),
std::forward<Callable>(stopOnError)))
{}
/**
* Initiation operator. Initiates composed connection procedure.
* #tparam Endpoint type of endpoint
* #tparam Duration type of timeout
* #param endpoint endpoint to be used for connection
* #param attempts number of attempts
* #param timeout value to be used as a timeout between attempts
*/
// TODO: require endpoint type
template<typename Endpoint, typename Duration>
void operator()(Endpoint &&endpoint,
size_t attempts,
Duration &&timeout = default_timeout())
{
pImpl_->endpoint_ = std::forward<Endpoint>(endpoint);
pImpl_->attempts_ = attempts;
pImpl_->timeout_ = std::forward<Duration>(timeout);
asyncConnect();
}
/**
* Initiation operator. Initiates composed connection procedure. Connection attempts default to infinite.
* #tparam Endpoint type of endpoint
* #tparam Duration type of timeout
* #param endpoint endpoint to be used for connection
* #param timeout value to be used as a timeout between attempts
*/
// TODO: require endpoint type
template<typename Endpoint, typename Duration>
void operator()(Endpoint &&endpoint,
Duration &&timeout = default_timeout())
{
pImpl_->endpoint_ = std::forward<Endpoint>(endpoint);
pImpl_->timeout_ = std::forward<Duration>(timeout);
asyncConnect();
}
/**
* Intermediate completion handler. Will be trying to connect until:<br>
* - has connected<br>
* - has run out of attempts<br>
* - user-provided callback #impl::stopOnError_ interrupts execution when a specific connection error has occurred<br>
* <br>Will be invoked only on connection events:<br>
* - success<br>
* - connection timeout or operation_cancelled in case if timer has expired<br>
* - connection errors<br>
* #param errorCode error code resulted from async_connect
*/
void operator()(const asio::error_code &errorCode)
{
if (!errorCode)
{
stopTimer();
pImpl_->completionHandler_(errorCode);
return;
}
const auto attemptsLeft = pImpl_->attempts_ == infinite_attempts() ?
infinite_attempts() :
pImpl_->attempts_ - 1;
if ((pImpl_->stopOnError_ &&
pImpl_->stopOnError_(errorCode == asio::error::operation_aborted ?
// special case for operation_aborted on timer expiration - need to send timed_out explicitly
// this should only be resulted from the timer calling cancel()
asio::error::timed_out :
errorCode)) ||
!attemptsLeft)
{
stopTimer();
pImpl_->completionHandler_(errorCode == asio::error::operation_aborted ?
asio::error::timed_out :
errorCode);
return;
}
pImpl_->attempts_ = attemptsLeft;
asyncConnect();
}
private:
struct impl
{
template<typename CompletionHandlerT>
impl(connection_type &connection,
CompletionHandlerT &&completionHandler)
: connection_(connection),
completionHandler_(std::forward<CompletionHandlerT>(completionHandler))
{}
template<typename CompletionHandlerT, typename Callable>
impl(connection_type &connection,
CompletionHandlerT &&completionHandler,
Callable &&stopOnError)
: connection_(connection),
completionHandler_(std::forward<CompletionHandlerT>(completionHandler)),
stopOnError_(std::forward<Callable>(stopOnError))
{}
executor_type get_executor() const noexcept
{
return asio::get_associated_executor(completionHandler_,
connection_.get_executor());
}
allocator_type get_allocator() const noexcept
{
// TODO: get completion handler allocator
return allocator_type();
}
connection_type &connection_;
completion_handler_t completionHandler_;
std::function<bool(const asio::error_code &)> stopOnError_;
// this should be default constructable or should I pass it in the constructor?
endpoint_type endpoint_;
// TODO: make timer initialization from get_executor()
asio::steady_timer timer_{connection_.get_executor()}; // this does not compile! -> {get_executor()};
asio::steady_timer::duration timeout_ = default_timeout();
size_t attempts_ = infinite_attempts();
};
// TODO: make unique?
std::shared_ptr<impl> pImpl_;
// cancels the connection on timeout!
void startTimer()
{
pImpl_->timer_.expires_after(pImpl_->timeout_); // it will automatically cancel a pending timer
pImpl_->timer_.async_wait(
[pImpl = pImpl_](const asio::error_code &errorCode)
{
// will occur on connection error before timeout
if (errorCode == asio::error::operation_aborted)
return;
// TODO: handle timer errors? What are the possible errors?
assert(!errorCode && "unexpected timer error!");
// stop attempts
pImpl->connection_.cancel();
});
}
void stopTimer()
{
pImpl_->timer_.cancel();
}
/**
* Will be trying to connect until:<br>
* - has run out of attempts
* - has been required to stop by stopOnError callback (if it was set)
* #param endpoint
* #param attempts
*/
void asyncConnect()
{
startTimer();
pImpl_->connection_.async_connect(pImpl_->endpoint_, std::move(*this));
}
};
template<typename Connection,
typename CompletionHandler,
typename Callable>
auto make_composed_connection_attempt(Connection &connection,
CompletionHandler &&completionHandler,
Callable &&stopOnError) ->
composed_connection_attempt<Connection, CompletionHandler>
{
return composed_connection_attempt<Connection, CompletionHandler>(connection,
std::forward<CompletionHandler>(
completionHandler),
std::forward<Callable>(stopOnError));
}
template<typename Connection,
typename Endpoint,
typename Duration,
typename CompletionToken,
typename Callable>
auto async_connection_attempt(Connection &connection,
Endpoint &&endpoint,
size_t attempts,
Duration &&timeout,
CompletionToken &&completionToken,
Callable &&stopOnError)
{
using result_t = asio::async_result<std::decay_t<CompletionToken>,
void(asio::error_code)>;
using completion_t = typename result_t::completion_handler_type;
completion_t completion{std::forward<CompletionToken>(completionToken)};
result_t result{completion};
auto composedConnectionAttempt = make_composed_connection_attempt(connection,
std::forward<completion_t>(completion),
std::forward<Callable>(stopOnError));
composedConnectionAttempt(std::forward<Endpoint>(endpoint),
attempts,
std::forward<Duration>(timeout));
return result.get();
}
Related
I had a synchronous method that send https request using http::write and than expect to read it's response using http::read.
However, in order to add timeout I had to move to async calls in my method. So I've tried to use http::async_read and http::async_write, but keep this overall flow synchronous so the method will return only once it has the https response.
here's my attempt :
class httpsClass {
std::optional<boost::beast::ssl_stream<boost::beast::tcp_stream>> ssl_stream_;
httpsClass(..) {
// notice that ssl_stream_ is initialized according to io_context_/ctx_
// that are class members that get set by c'tor args
ssl_stream_.emplace(io_context_, ctx_);
}
}
std::optional<boost::beast::http::response<boost::beast::http::dynamic_body>>
httpsClass::sendHttpsRequestAndGetResponse (
const boost::beast::http::request<boost::beast::http::string_body>
&request) {
try{
boost::asio::io_context ioc;
beast::flat_buffer buffer;
http::response<http::dynamic_body> res;
beast::get_lowest_layer(*ssl_stream_).expires_after(kTimeout);
boost::asio::spawn(ioc, [&, this](boost::asio::yield_context yield) {
auto sent = http::async_write(this->ssl_stream_.value(), request, yield);
auto received = http::async_read(this->ssl_stream_.value(), buffer, res, yield);
});
ioc.run();// this will finish only once the task above will be fully executed.
return res;
} catch (const std::exception &e) {
log("Error sending/receiving:{}", e.what());
return std::nullopt;
}
}
During trial, this method above reaches the task I assign for the internal io contexts (ioc). However, it gets stuck inside this task on the method async_write.
Anybody can help me figure out why it gets stuck? could it be related to the fact that ssl_stream_ is initialize with another io context object (io_context_) ?
Yes. The default executor for completion handlers on the ssl_stream_ is the outer io_context, which cannot make progress, because you're likely not running it.
My hint would be to:
avoid making the second io_context
also use the more typical future<Response> rather than optional<Response> (which loses the the error information)
avoid passing the io_context&. Instead pass executors, which you can more easily change to be a strand executor if so required.
Adding some code to make it self-contained:
class httpsClass {
ssl::context& ctx_;
std::string host_;
std::optional<beast::ssl_stream<beast::tcp_stream>> ssl_stream_;
beast::flat_buffer buffer_;
static constexpr auto kTimeout = 3s;
public:
httpsClass(net::any_io_executor ex, ssl::context& ctx, std::string host)
: ctx_(ctx)
, host_(host)
, ssl_stream_(std::in_place, ex, ctx_) {
auto ep = tcp::resolver(ex).resolve(host, "https");
ssl_stream_->next_layer().connect(ep);
ssl_stream_->handshake(ssl::stream_base::handshake_type::client);
log("Successfully connected to {} for {}",
ssl_stream_->next_layer().socket().remote_endpoint(), ep->host_name());
}
using Request = http::request<http::string_body>;
using Response = http::response<http::dynamic_body>;
std::future<Response> performRequest(Request const&);
};
Your implementation was pretty close, except for the unnecessary service:
std::future<httpsClass::Response>
httpsClass::performRequest(Request const& request) {
std::promise<Response> promise;
auto fut = promise.get_future();
auto coro = [this, r = request, p = std::move(promise)] //
(net::yield_context yield) mutable {
try {
auto& s = *ssl_stream_;
get_lowest_layer(s).expires_after(kTimeout);
r.prepare_payload();
r.set(http::field::host, host_);
auto sent = http::async_write(s, r, yield);
log("Sent: {}", sent);
http::response<http::dynamic_body> res;
auto received = http::async_read(s, buffer_, res, yield);
log("Received: {}", received);
p.set_value(std::move(res));
} catch (...) {
p.set_exception(std::current_exception());
}
};
spawn(ssl_stream_->get_executor(), std::move(coro));
return fut;
}
Now, it is important to have the io_service run()-ning for any asynchronous operations. With completely asynchronous code you wouldn't need threads, but as you are blocking on the response you will. The easiest way is to replace io_service with a thread_pool which does the run()-ning for you.
int main() {
net::thread_pool ioc;
ssl::context ctx(ssl::context::sslv23_client);
ctx.set_default_verify_paths();
for (auto query : {"/delay/2", "/delay/5"}) {
try {
httpsClass client(make_strand(ioc), ctx, "httpbin.org");
auto res = client.performRequest({http::verb::get, query, 11});
log("Request submitted... waiting for response");
log("Response: {}", res.get());
} catch (boost::system::system_error const& se) {
auto const& ec = se.code();
log("Error sending/receiving:{} at {}", ec.message(), ec.location());
} catch (std::exception const& e) {
log("Error sending/receiving:{}", e.what());
}
}
ioc.join();
}
As you can see this test will run two requests against https://httpbin.org/#/Dynamic_data/get_delay__delay_. The second will timeout because 5s exceeds the 3s expiration on the ssl_stream_.
Full Demo
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/beast.hpp>
#include <boost/beast/ssl.hpp>
#include <fmt/ostream.h>
#include <fmt/ranges.h>
#include <optional>
using namespace std::chrono_literals;
namespace net = boost::asio;
namespace beast = boost::beast;
namespace http = beast::http;
namespace ssl = net::ssl;
using net::ip::tcp;
////// LOG STUBS
template <> struct fmt::formatter<boost::source_location> : fmt::ostream_formatter {};
template <> struct fmt::formatter<tcp::endpoint> : fmt::ostream_formatter {};
template <bool isRequest, typename... Args>
struct fmt::formatter<http::message<isRequest, Args...>> : fmt::ostream_formatter {};
static inline void log(auto const& fmt, auto const&... args) {
fmt::print(fmt::runtime(fmt), args...);
fmt::print("\n");
std::fflush(stdout);
}
////// END LOG STUBS
class httpsClass {
ssl::context& ctx_;
std::string host_;
std::optional<beast::ssl_stream<beast::tcp_stream>> ssl_stream_;
beast::flat_buffer buffer_;
static constexpr auto kTimeout = 3s;
public:
httpsClass(net::any_io_executor ex, ssl::context& ctx, std::string host)
: ctx_(ctx)
, host_(host)
, ssl_stream_(std::in_place, ex, ctx_) {
auto ep = tcp::resolver(ex).resolve(host, "https");
ssl_stream_->next_layer().connect(ep);
ssl_stream_->handshake(ssl::stream_base::handshake_type::client);
log("Successfully connected to {} for {}",
ssl_stream_->next_layer().socket().remote_endpoint(), ep->host_name());
}
using Request = http::request<http::string_body>;
using Response = http::response<http::dynamic_body>;
std::future<Response> performRequest(Request const&);
};
std::future<httpsClass::Response>
httpsClass::performRequest(Request const& request) {
std::promise<Response> promise;
auto fut = promise.get_future();
auto coro = [this, r = request, p = std::move(promise)] //
(net::yield_context yield) mutable {
try {
auto& s = *ssl_stream_;
get_lowest_layer(s).expires_after(kTimeout);
r.prepare_payload();
r.set(http::field::host, host_);
auto sent = http::async_write(s, r, yield);
log("Sent: {}", sent);
http::response<http::dynamic_body> res;
auto received = http::async_read(s, buffer_, res, yield);
log("Received: {}", received);
p.set_value(std::move(res));
} catch (...) {
p.set_exception(std::current_exception());
}
};
spawn(ssl_stream_->get_executor(), std::move(coro));
return fut;
}
int main() {
net::thread_pool ioc;
ssl::context ctx(ssl::context::sslv23_client);
ctx.set_default_verify_paths();
for (auto query : {"/delay/2", "/delay/5"}) {
try {
httpsClass client(make_strand(ioc), ctx, "httpbin.org");
auto res = client.performRequest({http::verb::get, query, 11});
log("Request submitted... waiting for response");
log("Response: {}", res.get());
} catch (boost::system::system_error const& se) {
auto const& ec = se.code();
log("Error sending/receiving:{} at {}", ec.message(), ec.location());
} catch (std::exception const& e) {
log("Error sending/receiving:{}", e.what());
}
}
ioc.join();
}
Live on my system:
Hey I'm trying to wrap class provided by third party library to use Boost coroutines. The library also uses Boost but for the purpose of async operations uses completition handlers. Below is a simplified example that one could try. I think I'm close but for some reason awaitable returned from async_connect is of type void, whereas I would like to boost::error_code being returned. What am I missing?
#include <boost/asio.hpp>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/co_spawn.hpp>
#include <iostream>
using AsyncHandler = std::function<void(boost::system::error_code)>;
struct LibraryClient
{
LibraryClient(boost::asio::io_context& ioc)
: socket{ioc}
{}
boost::asio::ip::tcp::socket socket;
void async_connect(AsyncHandler handler = {})
{
boost::system::error_code ec;
boost::asio::ip::address ip_address = boost::asio::ip::address::from_string("127.0.0.1", ec);
boost::asio::ip::tcp::endpoint ep(ip_address, 9999);
socket.async_connect(ep, std::move(handler));
}
};
template<class CompletitionToken = boost::asio::use_awaitable_t<>>
auto do_async_connect(LibraryClient& client, CompletitionToken&& token = {})
{
auto initiate = [&client]<class H>(H&& self) mutable
{
client.async_connect([self = std::make_shared<H>(std::forward<H>(self))](auto&& r)
{
(*self)(r);
});
};
return boost::asio::async_initiate<
CompletitionToken, boost::system::error_code(boost::system::error_code)
>(initiate, token);
}
struct LibraryClientWrapper
{
LibraryClient client;
boost::asio::awaitable<boost::system::error_code> async_connect()
{
//auto ec = co_await do_something_with_client();
const auto ec = co_await do_async_connect(client);
}
};
int main()
{
auto ioc = boost::asio::io_context{};
auto client = LibraryClientWrapper{LibraryClient{ioc}};
ioc.run();
}
EDIT
It seems that I've found something. I slightly modified code, removed all code not needed for the purpose of this example
#include <boost/asio.hpp>
#include <boost/asio/awaitable.hpp>
#include <boost/asio/co_spawn.hpp>
#include <iostream>
#include <cassert>
template<class CompletitionToken>
auto do_async_connect(LibraryClient& client, CompletitionToken&& token)
{
auto initiate = [](auto&& handler) {
handler(nullptr, 90);
};
return boost::asio::async_initiate<CompletitionToken, void(Eptr, int)>(
initiate, std::forward<CompletitionToken>(token)
);
}
struct LibraryClientWrapper
{
LibraryClient client;
boost::asio::awaitable<void> async_connect()
{
const auto ec = co_await do_async_connect(client, boost::asio::use_awaitable);
assert(ec == 90);
}
};
void rethrow_exception(std::exception_ptr eptr)
{
if (eptr)
{
std::rethrow_exception(eptr);
}
}
int main()
{
auto ioc = boost::asio::io_context{};
auto client = LibraryClientWrapper{LibraryClient{ioc}};
boost::asio::co_spawn(ioc, client.async_connect(), rethrow_exception);
ioc.run();
}
As you can see I changed signatute to take both std::exception_ptr and int, and this resulted in int being returned properly from coroutine. But I don't get why exactly this signature is required, in particular std::exception_ptr as first parameter.
I followed this and this
Well I managed to let's say solve it. But I don't get why the signature of handler has to be in a form of void(std:::exception_ptr, error_code) in order to return error_code. Also I failed to find it in boost documentation. I would be grateful if anyone could provide some kind of explanation. I believe that this could be better and also more generic. So far it doesn't handle eg. handling multiple return values like we have in e.g. async_send where completition handler has to be able to handle two params like error_code and bytes_sent.
#include <boost/asio.hpp>
#include <iostream>
using AsyncHandler = std::function<void(boost::system::error_code)>;
void rethrow_exception(std::exception_ptr eptr)
{
if (eptr)
{
std::rethrow_exception(eptr);
}
}
using Eptr = std::exception_ptr;
struct LibraryClient
{
LibraryClient(LibraryClient&&) = default;
LibraryClient(boost::asio::io_context& ioc)
: socket{ioc}
{}
boost::asio::ip::tcp::socket socket;
void async_connect(AsyncHandler handler = {})
{
boost::system::error_code ec;
boost::asio::ip::address ip_address = boost::asio::ip::address::from_string("127.0.0.1", ec);
boost::asio::ip::tcp::endpoint ep(ip_address, 9999);
socket.async_connect(ep, std::move(handler));
}
void async_disconnect(AsyncHandler handler = {})
{
auto ec = boost::system::error_code{};
socket.shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec);
}
};
template<class R, class Func, class CompletitionHandler = boost::asio::use_awaitable_t<>>
auto awaitable_call(Func&& func, CompletitionHandler&& handler = {})
{
using Signature_t = void(Eptr, R);
auto initiate = [func = std::forward<Func>(func)]<class Handler>(Handler&& self) mutable
{
std::invoke(func, [self = std::make_shared<Handler>(std::forward<Handler>(self))](auto&&... args) {
(*self)(std::current_exception(), std::forward<decltype(args)>(args)...);
});
};
return boost::asio::async_initiate<CompletitionHandler, Signature_t>(initiate, handler);
}
template<class Func, class O, class...Args>
auto bind_awaitable_func(Func&& func, O&& o, Args&&...args)
{
return std::bind(std::forward<Func>(func),
std::forward<O>(o),
std::forward<Args>(args)...,
std::placeholders::_1
);
}
struct LibraryClientWrapper
{
LibraryClient client;
using Impl = LibraryClient;
boost::asio::awaitable<void> async_connect()
{
const auto r1 = co_await awaitable_call<boost::system::error_code>(
bind_awaitable_func(&LibraryClient::async_connect, std::ref(client))
);
}
boost::asio::awaitable<void> async_disconnect()
{
const auto r1 = co_await awaitable_call<boost::system::error_code>(
bind_awaitable_func(&LibraryClient::async_disconnect, std::ref(client))
);
}
};
int main()
{
auto ioc = boost::asio::io_context{};
auto client = LibraryClientWrapper{LibraryClient{ioc}};
boost::asio::co_spawn(ioc, client.async_disconnect(), rethrow_exception);
ioc.run();
}
So I don't know why but I can't wrap my head around the boost Beast websocket server and how you can (or should) interact with it.
The basic program I made looks like this, across 2 classes (WebSocketListener and WebSocketSession)
https://www.boost.org/doc/libs/develop/libs/beast/example/websocket/server/async/websocket_server_async.cpp
Everything works great, I can connect, and it echos messages. We will only ever have 1 active session, and I'm struggling to understand how I can interface with this session from outside its class, in my int main() for example or another class that may be responsible for issuing read/writes. We will be using a simple Command design pattern of commands async coming into a buffer that get processed against hardware and then async_write back out the results. The reading and queuing is straight forward and will be done in the WebsocketSession, but everything I see for write is just reading/writing directly inside the session and not getting external input.
I've seen examples using things like boost::asio::async_write(socket, buffer, ...) but I'm struggling to understand how I get a reference to said socket when the session is created by the listener itself.
Instead of depending on a socket from outside of the session, I'd depend on your program logic to implement the session.
That's because the session (connection) will govern its own lifetime, arriving spontaneously and potentially disconnecting spontaneously. Your hardware, most likely, doesn't.
So, borrowing the concept of "Dependency Injection" tell your listener about your application logic, and then call into that from the session. (The listener will "inject" the dependency into each newly created session).
Let's start from a simplified/modernized version of your linked example.
Now, where we prepare a response, you want your own logic injected, so let's write it how we would imagine it:
void on_read(beast::error_code ec, std::size_t /*bytes_transferred*/) {
if (ec == websocket::error::closed) return;
if (ec.failed()) return fail(ec, "read");
// Process the message
response_ = logic_->Process(beast::buffers_to_string(buffer_));
ws_.async_write(
net::buffer(response_),
beast::bind_front_handler(&session::on_write, shared_from_this()));
}
Here we declare the members and initialize them from the constructor:
std::string response_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
explicit session(tcp::socket&& socket,
std::shared_ptr<AppDomain::Logic> logic)
: ws_(std::move(socket))
, logic_(logic) {}
Now, we need to inject the listener with the logic so we can pass it along:
class listener : public std::enable_shared_from_this<listener> {
net::any_io_executor ex_;
tcp::acceptor acceptor_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
listener(net::any_io_executor ex, tcp::endpoint endpoint,
std::shared_ptr<AppDomain::Logic> logic)
: ex_(ex)
, acceptor_(ex)
, logic_(logic) {
So that we can pass it along:
void on_accept(beast::error_code ec, tcp::socket socket) {
if (ec) {
fail(ec, "accept");
} else {
std::make_shared<session>(std::move(socket), logic_)->run();
}
// Accept another connection
do_accept();
}
Now making the real logic in main:
auto logic = std::make_shared<AppDomain::Logic>("StackOverflow Demo/");
try {
// The io_context is required for all I/O
net::thread_pool ioc(threads);
std::make_shared<listener>(ioc.get_executor(),
tcp::endpoint{address, port}, logic)
->run();
ioc.join();
} catch (beast::system_error const& se) {
fail(se.code(), "listener");
}
Demo Logic
Just for fun, let's implement some random logic, that might be implemented in hardware in the future:
namespace AppDomain {
struct Logic {
std::string banner;
Logic(std::string msg) : banner(std::move(msg)) {}
std::string Process(std::string request) {
std::cout << "Processing: " << std::quoted(request) << std::endl;
std::string result;
auto fold = [&result](auto op, double initial) {
return [=, &result](auto& ctx) {
auto& args = _attr(ctx);
auto v = accumulate(args.begin(), args.end(), initial, op);
result = "Fold:" + std::to_string(v);
};
};
auto invalid = [&result](auto& ctx) {
result = "Invalid Command: " + _attr(ctx);
};
using namespace boost::spirit::x3;
auto args = rule<void, std::vector<double>>{} = '(' >> double_ % ',' >> ')';
auto add = "adding" >> args[fold(std::plus<>{}, 0)];
auto mul = "multiplying" >> args[fold(std::multiplies<>{}, 1)];
auto err = lexeme[+char_][invalid];
phrase_parse(begin(request), end(request), add | mul | err, blank);
return banner + result;
}
};
} // namespace AppDomain
Now you can see it in action: Full Listing
Where To Go From Here
What if you need multiple responses for one request?
You need a queue. I usually call those outbox so searching for outbox_, _outbox etc will give lots of examples.
Those examples will also show how to deal with other situations where writes can be "externally initiated", and how to safely enqueue those. Perhaps a very engaging example is here How to batch send unsent messages in asio
Listing For Reference
In case the links go dead in the future:
#include <boost/algorithm/string/trim.hpp>
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include <filesystem>
#include <functional>
#include <iostream>
static std::string g_app_name = "app-logic-service";
#include <boost/core/demangle.hpp> // just for our demo logic
#include <boost/spirit/home/x3.hpp> // idem
#include <numeric> // idem
namespace AppDomain {
struct Logic {
std::string banner;
Logic(std::string msg) : banner(std::move(msg)) {}
std::string Process(std::string request) {
std::string result;
auto fold = [&result](auto op, double initial) {
return [=, &result](auto& ctx) {
auto& args = _attr(ctx);
auto v = accumulate(args.begin(), args.end(), initial, op);
result = "Fold:" + std::to_string(v);
};
};
auto invalid = [&result](auto& ctx) {
result = "Invalid Command: " + _attr(ctx);
};
using namespace boost::spirit::x3;
auto args = rule<void, std::vector<double>>{} = '(' >> double_ % ',' >> ')';
auto add = "adding" >> args[fold(std::plus<>{}, 0)];
auto mul = "multiplying" >> args[fold(std::multiplies<>{}, 1)];
auto err = lexeme[+char_][invalid];
phrase_parse(begin(request), end(request), add | mul | err, blank);
return banner + result;
}
};
} // namespace AppDomain
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
// Report a failure
void fail(beast::error_code ec, char const* what) {
std::cerr << what << ": " << ec.message() << "\n";
}
class session : public std::enable_shared_from_this<session> {
websocket::stream<beast::tcp_stream> ws_;
beast::flat_buffer buffer_;
std::string response_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
explicit session(tcp::socket&& socket,
std::shared_ptr<AppDomain::Logic> logic)
: ws_(std::move(socket))
, logic_(logic) {}
void run() {
// Get on the correct executor
// strand for thread safety
dispatch(
ws_.get_executor(),
beast::bind_front_handler(&session::on_run, shared_from_this()));
}
private:
void on_run() {
// Set suggested timeout settings for the websocket
ws_.set_option(websocket::stream_base::timeout::suggested(
beast::role_type::server));
// Set a decorator to change the Server of the handshake
ws_.set_option(websocket::stream_base::decorator(
[](websocket::response_type& res) {
res.set(http::field::server,
std::string(BOOST_BEAST_VERSION_STRING) + " " +
g_app_name);
}));
// Accept the websocket handshake
ws_.async_accept(
beast::bind_front_handler(&session::on_accept, shared_from_this()));
}
void on_accept(beast::error_code ec) {
if (ec)
return fail(ec, "accept");
do_read();
}
void do_read() {
ws_.async_read(
buffer_,
beast::bind_front_handler(&session::on_read, shared_from_this()));
}
void on_read(beast::error_code ec, std::size_t /*bytes_transferred*/) {
if (ec == websocket::error::closed) return;
if (ec.failed()) return fail(ec, "read");
// Process the message
auto request = boost::algorithm::trim_copy(
beast::buffers_to_string(buffer_.data()));
std::cout << "Processing: " << std::quoted(request) << " from "
<< beast::get_lowest_layer(ws_).socket().remote_endpoint()
<< std::endl;
response_ = logic_->Process(request);
ws_.async_write(
net::buffer(response_),
beast::bind_front_handler(&session::on_write, shared_from_this()));
}
void on_write(beast::error_code ec, std::size_t bytes_transferred) {
boost::ignore_unused(bytes_transferred);
if (ec)
return fail(ec, "write");
// Clear the buffer
buffer_.consume(buffer_.size());
// Do another read
do_read();
}
};
// Accepts incoming connections and launches the sessions
class listener : public std::enable_shared_from_this<listener> {
net::any_io_executor ex_;
tcp::acceptor acceptor_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
listener(net::any_io_executor ex, tcp::endpoint endpoint,
std::shared_ptr<AppDomain::Logic> logic)
: ex_(ex)
, acceptor_(ex)
, logic_(logic) {
acceptor_.open(endpoint.protocol());
acceptor_.set_option(tcp::acceptor::reuse_address(true));
acceptor_.bind(endpoint);
acceptor_.listen(tcp::acceptor::max_listen_connections);
}
// Start accepting incoming connections
void run() { do_accept(); }
private:
void do_accept() {
// The new connection gets its own strand
acceptor_.async_accept(make_strand(ex_),
beast::bind_front_handler(&listener::on_accept,
shared_from_this()));
}
void on_accept(beast::error_code ec, tcp::socket socket) {
if (ec) {
fail(ec, "accept");
} else {
std::make_shared<session>(std::move(socket), logic_)->run();
}
// Accept another connection
do_accept();
}
};
int main(int argc, char* argv[]) {
g_app_name = std::filesystem::path(argv[0]).filename();
if (argc != 4) {
std::cerr << "Usage: " << g_app_name << " <address> <port> <threads>\n"
<< "Example:\n"
<< " " << g_app_name << " 0.0.0.0 8080 1\n";
return 1;
}
auto const address = net::ip::make_address(argv[1]);
auto const port = static_cast<uint16_t>(std::atoi(argv[2]));
auto const threads = std::max<int>(1, std::atoi(argv[3]));
auto logic = std::make_shared<AppDomain::Logic>("StackOverflow Demo/");
try {
// The io_context is required for all I/O
net::thread_pool ioc(threads);
std::make_shared<listener>(ioc.get_executor(),
tcp::endpoint{address, port}, logic)
->run();
ioc.join();
} catch (beast::system_error const& se) {
fail(se.code(), "listener");
}
}
UPDATE
In response to the comments I reified the outbox pattern again. Note some of the comments in the code.
Compiler Explorer
#include <boost/algorithm/string/trim.hpp>
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include <deque>
#include <filesystem>
#include <functional>
#include <iostream>
#include <list>
static std::string g_app_name = "app-logic-service";
#include <boost/core/demangle.hpp> // just for our demo logic
#include <boost/spirit/home/x3.hpp> // idem
#include <numeric> // idem
namespace AppDomain {
struct Logic {
std::string banner;
Logic(std::string msg) : banner(std::move(msg)) {}
std::string Process(std::string request) {
std::string result;
auto fold = [&result](auto op, double initial) {
return [=, &result](auto& ctx) {
auto& args = _attr(ctx);
auto v = accumulate(args.begin(), args.end(), initial, op);
result = "Fold:" + std::to_string(v);
};
};
auto invalid = [&result](auto& ctx) {
result = "Invalid Command: " + _attr(ctx);
};
using namespace boost::spirit::x3;
auto args = rule<void, std::vector<double>>{} = '(' >> double_ % ',' >> ')';
auto add = "adding" >> args[fold(std::plus<>{}, 0)];
auto mul = "multiplying" >> args[fold(std::multiplies<>{}, 1)];
auto err = lexeme[+char_][invalid];
phrase_parse(begin(request), end(request), add | mul | err, blank);
return banner + result;
}
};
} // namespace AppDomain
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
// Report a failure
void fail(beast::error_code ec, char const* what) {
std::cerr << what << ": " << ec.message() << "\n";
}
class session : public std::enable_shared_from_this<session> {
websocket::stream<beast::tcp_stream> ws_;
beast::flat_buffer buffer_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
explicit session(tcp::socket&& socket,
std::shared_ptr<AppDomain::Logic> logic)
: ws_(std::move(socket))
, logic_(logic) {}
void run() {
// Get on the correct executor
// strand for thread safety
dispatch(
ws_.get_executor(),
beast::bind_front_handler(&session::on_run, shared_from_this()));
}
void post_message(std::string msg) {
post(ws_.get_executor(),
[self = shared_from_this(), this, msg = std::move(msg)] {
do_post_message(std::move(msg));
});
}
private:
void on_run() {
// on the strand
// Set suggested timeout settings for the websocket
ws_.set_option(websocket::stream_base::timeout::suggested(
beast::role_type::server));
// Set a decorator to change the Server of the handshake
ws_.set_option(websocket::stream_base::decorator(
[](websocket::response_type& res) {
res.set(http::field::server,
std::string(BOOST_BEAST_VERSION_STRING) + " " +
g_app_name);
}));
// Accept the websocket handshake
ws_.async_accept(
beast::bind_front_handler(&session::on_accept, shared_from_this()));
}
void on_accept(beast::error_code ec) {
// on the strand
if (ec)
return fail(ec, "accept");
do_read();
}
void do_read() {
// on the strand
buffer_.clear();
ws_.async_read(
buffer_,
beast::bind_front_handler(&session::on_read, shared_from_this()));
}
void on_read(beast::error_code ec, std::size_t /*bytes_transferred*/) {
// on the strand
if (ec == websocket::error::closed) return;
if (ec.failed()) return fail(ec, "read");
// Process the message
auto request = boost::algorithm::trim_copy(
beast::buffers_to_string(buffer_.data()));
std::cout << "Processing: " << std::quoted(request) << " from "
<< beast::get_lowest_layer(ws_).socket().remote_endpoint()
<< std::endl;
do_post_message(logic_->Process(request)); // already on the strand
do_read();
}
std::deque<std::string> _outbox;
void do_post_message(std::string msg) {
// on the strand
_outbox.push_back(std::move(msg));
if (_outbox.size() == 1)
do_write_loop();
}
void do_write_loop() {
// on the strand
if (_outbox.empty())
return;
ws_.async_write( //
net::buffer(_outbox.front()),
[self = shared_from_this(), this] //
(beast::error_code ec, size_t bytes_transferred) {
// on the strand
boost::ignore_unused(bytes_transferred);
if (ec)
return fail(ec, "write");
_outbox.pop_front();
do_write_loop();
});
}
};
// Accepts incoming connections and launches the sessions
class listener : public std::enable_shared_from_this<listener> {
net::any_io_executor ex_;
tcp::acceptor acceptor_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
listener(net::any_io_executor ex, tcp::endpoint endpoint,
std::shared_ptr<AppDomain::Logic> logic)
: ex_(ex)
, acceptor_(make_strand(ex)) // NOTE to guard sessions_
, logic_(logic) {
acceptor_.open(endpoint.protocol());
acceptor_.set_option(tcp::acceptor::reuse_address(true));
acceptor_.bind(endpoint);
acceptor_.listen(tcp::acceptor::max_listen_connections);
}
// Start accepting incoming connections
void run() { do_accept(); }
void broadcast(std::string msg) {
post(acceptor_.get_executor(),
beast::bind_front_handler(&listener::do_broadcast,
shared_from_this(), std::move(msg)));
}
private:
using handle_t = std::weak_ptr<session>;
std::list<handle_t> sessions_;
void do_broadcast(std::string const& msg) {
for (auto handle : sessions_)
if (auto sess = handle.lock())
sess->post_message(msg);
}
void do_accept() {
// The new connection gets its own strand
acceptor_.async_accept(make_strand(ex_),
beast::bind_front_handler(&listener::on_accept,
shared_from_this()));
}
void on_accept(beast::error_code ec, tcp::socket socket) {
// on the strand
if (ec) {
fail(ec, "accept");
} else {
auto sess = std::make_shared<session>(std::move(socket), logic_);
sessions_.emplace_back(sess);
// optionally:
sessions_.remove_if(std::mem_fn(&handle_t::expired));
sess->run();
}
// Accept another connection
do_accept();
}
};
static void emulate_hardware_stuff(std::shared_ptr<listener> srv) {
using std::this_thread::sleep_for;
using namespace std::chrono_literals;
// Extremely simplistic. Instead I'd recommend `steady_timer` with
// `_async_wait` here, but since I'm just making a sketch...
unsigned i = 0;
while (true) {
sleep_for(1s);
srv->broadcast("Hardware thing #" + std::to_string(++i));
}
}
int main(int argc, char* argv[]) {
g_app_name = std::filesystem::path(argv[0]).filename();
if (argc != 4) {
std::cerr << "Usage: " << g_app_name << " <address> <port> <threads>\n"
<< "Example:\n"
<< " " << g_app_name << " 0.0.0.0 8080 1\n";
return 1;
}
auto const address = net::ip::make_address(argv[1]);
auto const port = static_cast<uint16_t>(std::atoi(argv[2]));
auto const threads = std::max<int>(1, std::atoi(argv[3]));
auto logic = std::make_shared<AppDomain::Logic>("StackOverflow Demo/");
try {
// The io_context is required for all I/O
net::thread_pool ioc(threads);
auto srv = std::make_shared<listener>( //
ioc.get_executor(), //
tcp::endpoint{address, port}, //
logic);
srv->run();
std::thread something_hardware(emulate_hardware_stuff, srv);
ioc.join();
something_hardware.join();
} catch (beast::system_error const& se) {
fail(se.code(), "listener");
}
}
With Live Demo:
I'm trying to hack into an existing appilication a socks4 client. The program uses asynchronous boost::asio.
So i've worked out so far that i need to negotiate with the socks4 server first:
boost::asio::ip::tcp::endpoint socks_proxy{boost::asio::ip::make_address("127.0.0.1"),1080};
if( socks_proxy.protocol() != boost::asio::ip::tcp::v4() )
{
throw boost::system::system_error(
boost::asio::error::address_family_not_supported);
}
....
boost::asio::ip::tcp::socket* m_socket;
// negotiate with the socks server
// m_endpoint is an item in std::queue<boost::asio::ip::basic_endpoint<boost::asio::ip::tcp>> m_endpoints
boost::asio::ip::address_v4::bytes_type address_ = m_endpoint.address().to_v4().to_bytes();
unsigned short port = m_endpoint.port();
unsigned char port_high_byte_ = (port >> 8) & 0xff;
unsigned char port_low_byte_ = port & 0xff;
boost::array<boost::asio::const_buffer, 7> send_buffer =
{
{
boost::asio::buffer(&SOCKS_VERSION, 1), // const unsigned char SOCKS_VERSION = 0x04;
boost::asio::buffer(&SOCKS_CONNECT, 1), // const unsigned char SOCKS_VERSION = 0x04;
boost::asio::buffer(&port_high_byte_, 1),
boost::asio::buffer(&port_low_byte_, 1),
boost::asio::buffer(address_),
boost::asio::buffer("userid"),
boost::asio::buffer(&null_byte_, 1). // unsigned char null_byte_ = 0;
}
};
// initiate socks
boost::asio::write( m_socket, send_buffer );
// check it worked
unsigned char status_;
boost::array<boost::asio::mutable_buffer, 5> reply_buffer =
{
{
boost::asio::buffer(&null_byte_, 1),
boost::asio::buffer(&status_, 1),
boost::asio::buffer(&port_high_byte_, 1),
boost::asio::buffer(&port_low_byte_, 1),
boost::asio::buffer(address_)
}
};
boost::asio::read( m_socket, reply_buffer );
if( ! ( null_byte_ == 0 && status_ == 0x5a ) )
{
std::cout << "Proxy connection failed.\n";
}
However, the exist application code bascially does:
boost::asio::ip::tcp::socket* m_socket;
m_nonsecuresocket = std::make_shared<boost::asio::ip::tcp::socket>(m_io_service);
m_socket = m_nonsecuresocket.get();
m_socket->async_connect(m_endpoint,
m_io_strand.wrap(boost::bind(&CLASS::connect_handler, this, _1)));
so that even if i could get it to compile, the async_connect would disconnect the socket anyway.
How can i integrate the socks4 client code into the async_connect()?
As I commented, I think your question requires a lot more focus. However, since this is actually a useful question and it might be good to have an example, I went ahead and implemented a socks4::async_proxy_connect operation:
tcp::socket sock{io};
tcp::endpoint
target({}, 80), // connect to localhost:http
proxy{{}, 1080}; // via SOCKS4 proxy on localhost:1080
socks4::async_proxy_connect(sock, target, proxy, handler);
// continue using sock
Loose ends:
synchronous version is not implemented yet (but should be a lot simpler) added
does not include address resolution (just as your question). Integrating that would require quite a bit of the groundwork in boost::asio::async_connect that takes a resolver query. Sadly, that doesn't seen well factored for reuse.
Listing
File socks4.hpp
#include <boost/asio.hpp>
#include <boost/endian/arithmetic.hpp>
namespace socks4 { // threw in the kitchen sink for error codes
#ifdef STANDALONE_ASIO
using std::error_category;
using std::error_code;
using std::error_condition;
using std::system_error;
#else
namespace asio = boost::asio;
using boost::system::error_category;
using boost::system::error_code;
using boost::system::error_condition;
using boost::system::system_error;
#endif
enum class result_code {
ok = 0,
invalid_version = 1,
rejected_or_failed = 3,
need_identd = 4,
unconirmed_userid = 5,
//
failed = 99,
};
auto const& get_result_category() {
struct impl : error_category {
const char* name() const noexcept override { return "result_code"; }
std::string message(int ev) const override {
switch (static_cast<result_code>(ev)) {
case result_code::ok: return "Success";
case result_code::invalid_version: return "SOCKS4 invalid reply version";
case result_code::rejected_or_failed: return "SOCKS4 rejected or failed";
case result_code::need_identd: return "SOCKS4 unreachable (client not running identd)";
case result_code::unconirmed_userid: return "SOCKS4 identd could not confirm user ID";
case result_code::failed: return "SOCKS4 general unexpected failure";
default: return "unknown error";
}
}
error_condition
default_error_condition(int ev) const noexcept override {
return error_condition{ev, *this};
}
bool equivalent(int ev, error_condition const& condition)
const noexcept override {
return condition.value() == ev && &condition.category() == this;
}
bool equivalent(error_code const& error,
int ev) const noexcept override {
return error.value() == ev && &error.category() == this;
}
} const static instance;
return instance;
}
error_code make_error_code(result_code se) {
return error_code{
static_cast<std::underlying_type<result_code>::type>(se),
get_result_category()};
}
} // namespace socks4
template <>
struct boost::system::is_error_code_enum<socks4::result_code>
: std::true_type {};
namespace socks4 {
using namespace std::placeholders;
template <typename Endpoint> struct core_t {
Endpoint _target;
Endpoint _proxy;
core_t(Endpoint target, Endpoint proxy)
: _target(target)
, _proxy(proxy) {}
#pragma pack(push)
#pragma pack(1)
using ipv4_octets = boost::asio::ip::address_v4::bytes_type;
using net_short = boost::endian::big_uint16_t;
struct alignas(void*) Req {
uint8_t version = 0x04;
uint8_t cmd = 0x01;
net_short port;
ipv4_octets address;
} _request{0x04, 0x01, _target.port(),
_target.address().to_v4().to_bytes()};
struct alignas(void*) Res {
uint8_t reply_version;
uint8_t status;
net_short port;
ipv4_octets address;
} _response;
#pragma pack(pop)
using const_buffer = boost::asio::const_buffer;
using mutable_buffer = boost::asio::mutable_buffer;
auto request_buffers(char const* szUserId) const {
return std::array<const_buffer, 2>{
boost::asio::buffer(&_request, sizeof(_request)),
boost::asio::buffer(szUserId, strlen(szUserId) + 1)};
}
auto response_buffers() {
return boost::asio::buffer(&_response, sizeof(_response));
}
error_code get_result(error_code ec = {}) const {
if (ec)
return ec;
if (_response.reply_version != 0)
return result_code::invalid_version;
switch (_response.status) {
case 0x5a: return result_code::ok; // Request grantd
case 0x5B: return result_code::rejected_or_failed;
case 0x5C: return result_code::need_identd;
case 0x5D: return result_code::unconirmed_userid;
}
return result_code::failed;
}
};
template <typename Socket, typename Completion>
struct async_proxy_connect_op {
using Endpoint = typename Socket::protocol_type::endpoint;
using executor_type = typename Socket::executor_type;
auto get_executor() { return _socket.get_executor(); }
private:
core_t<Endpoint> _core;
Socket& _socket;
std::string _userId;
Completion _handler;
public:
async_proxy_connect_op(Completion handler, Socket& s, Endpoint target,
Endpoint proxy, std::string user_id = {})
: _core(target, proxy)
, _socket(s)
, _userId(std::move(user_id))
, _handler(std::move(handler)) {}
using Self = std::unique_ptr<async_proxy_connect_op>;
void init(Self&& self) { operator()(self, INIT{}); }
private:
// states
struct INIT{};
struct CONNECT{};
struct SENT{};
struct ONRESPONSE{};
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
void operator()(Self& self, INIT) {
_socket.async_connect(_core._proxy,
std::bind(Binder{std::move(self)}, CONNECT{}, _1));
}
void operator()(Self& self, CONNECT, error_code ec) {
if (ec) return _handler(ec);
boost::asio::async_write(
_socket,
_core.request_buffers(_userId.c_str()),
std::bind(Binder{std::move(self)}, SENT{}, _1, _2));
}
void operator()(Self& self, SENT, error_code ec, size_t xfer) {
if (ec) return _handler(ec);
auto buf = _core.response_buffers();
boost::asio::async_read(
_socket, buf, boost::asio::transfer_exactly(buffer_size(buf)),
std::bind(Binder{std::move(self)}, ONRESPONSE{}, _1, _2));
}
void operator()(Self& self, ONRESPONSE, error_code ec, size_t xfer) {
_handler(_core.get_result(ec));
}
};
template <typename Socket,
typename Endpoint = typename Socket::protocol_type::endpoint>
error_code proxy_connect(Socket& s, Endpoint ep, Endpoint proxy,
std::string const& user_id, error_code& ec) {
core_t<Endpoint> core(ep, proxy);
ec.clear();
s.connect(core._proxy, ec);
if (!ec)
boost::asio::write(s, core.request_buffers(user_id.c_str()),
ec);
auto buf = core.response_buffers();
if (!ec)
boost::asio::read(s, core.response_buffers(),
boost::asio::transfer_exactly(buffer_size(buf)), ec);
return ec = core.get_result(ec);
}
template <typename Socket,
typename Endpoint = typename Socket::protocol_type::endpoint>
void proxy_connect(Socket& s, Endpoint ep, Endpoint proxy,
std::string const& user_id = "") {
error_code ec;
if (proxy_connect(s, ep, proxy, user_id, ec))
throw system_error(ec);
}
template <typename Socket, typename Token,
typename Endpoint = typename Socket::protocol_type::endpoint>
auto async_proxy_connect(Socket& s, Endpoint ep, Endpoint proxy,
std::string user_id, Token&& token) {
using Result = asio::async_result<std::decay_t<Token>, void(error_code)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
using Op = async_proxy_connect_op<Socket, Completion>;
// make an owning self ptr, to serve a unique async chain
auto self =
std::make_unique<Op>(completion, s, ep, proxy, std::move(user_id));
self->init(std::move(self));
return result.get();
}
template <typename Socket, typename Token,
typename Endpoint = typename Socket::protocol_type::endpoint>
auto async_proxy_connect(Socket& s, Endpoint ep, Endpoint proxy, Token&& token) {
return async_proxy_connect<Socket, Token, Endpoint>(
s, ep, proxy, "", std::forward<Token>(token));
}
} // namespace socks4
Demo
File test.cpp
#include "socks4.hpp"
#include <boost/beast.hpp>
#include <boost/beast/http.hpp>
#include <iostream>
int main(int argc, char**) {
bool synchronous = argc > 1;
using boost::asio::ip::tcp;
boost::asio::thread_pool ctx(1); // just one thread will do
tcp::socket sock{ctx};
tcp::endpoint target(
boost::asio::ip::address_v4::from_string("173.203.57.63"), 80),
proxy{{}, 1080};
try {
if (synchronous) {
std::cerr << "Using synchronous interface" << std::endl;
socks4::proxy_connect(sock, target,
proxy); // throws system_error if failed
} else {
std::cerr << "Using asynchronous interface" << std::endl;
// using the async interface (still emulating synchronous by using
// future for brevity of this demo)
auto fut = socks4::async_proxy_connect(sock, target, proxy,
boost::asio::use_future);
fut.get(); // throws system_error if failed
}
// Now do a request using beast
namespace beast = boost::beast;
namespace http = beast::http;
{
http::request<http::empty_body> req(http::verb::get, "/", 11);
req.set(http::field::host, "coliru.stacked-crooked.com");
req.set(http::field::connection, "close");
std::cout << "-------\nRequest: " << req << "\n-------\n";
http::write(sock, req);
}
{
http::response<http::string_body> res;
beast::flat_buffer buf;
http::read(sock, buf, res);
std::cout << "\n-------\nResponse: " << res << "\n";
}
} catch(socks4::system_error const& se) {
std::cerr << "Error: " << se.code().message() << std::endl;
}
ctx.join();
}
Output
Using asynchronous interface
-------
Request: GET / HTTP/1.1
Host: coliru.stacked-crooked.com
Connection: close
-------
-------
Response: HTTP/1.1 200 OK
Content-Type: text/html;charset=utf-8
Content-Length: 8616
Server: WEBrick/1.4.2 (Ruby/2.5.1/2018-03-29) OpenSSL/1.0.2g
Date: Thu, 29 Apr 2021 19:05:03 GMT
Connection: close
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Frameset//EN">
<html>
<head>
<title>Coliru</title>
(rest of response omitted)
Using boost::asio, I'm coding network stuff.
I tried to build a simple send-and-receive-string protocol.
The sender first send the string size to the receiver. Then the sender sends the actual string to the receiver.
In particular, I designed the following two protocols.
A sender holding a string sends it to a receiver. Upon receiving it, the receiver shows the string.
Execute above protocol sequentially (two times).
I built the above protocols as shown below:
If I execute this protocol once, that works fine.
However, if i execute this protocol more than once (e.g. two times), the
string size that the receiver receives gets wrong.
First time : 1365 bytes.
Second time : 779073 bytes. (just read not 779073 but 7790)
I found that os << data_size is not done in a binary way. "779073" is just sent as 6 bytes string. But the receiver just reads 4bytes of it.
How to send a binary data and to receive a binary data using boost::asio and boost::asio::streambuf?
Receiver
// socket is already defined
// ** first step: recv data size
boost::asio::streambuf buf;
boost::asio::read(
socket,
buf,
boost::asio::transfer_exactly(sizeof(uint32_t))
);
std::istream iss(&buf);
uint32_t read_len;
iss >> read_len;
// ** second step: recv payload based on the data size
boost::asio::streambuf buf2;
read_len = boost::asio::read(socket, buf2,
boost::asio::transfer_exactly(read_len), error);
cout << " read "<< read_len << " bytes payload" << endl;
std::istream is_payload(&buf2);
std::string str;
is_payload >> str;
cout << str << endl;
Sender
// socket is already defined
string str=...; // some string to be sent
// ** first step: tell the string size to the reciever
uint32_t data_size = str.size();
boost::asio::streambuf send_buf;
std::ostream os(&send_buf);
os << data_size;
size_t sent_byte = boost::asio::write(socket, send_buf.data());
cout << sent_byte << endl; // debug purpose
// ** second step: send the actual string (payload)
sent_byte = boost::asio::write(socket, boost::asio::buffer(reinterpret_cast<const char*>(&str[0]), data_size));
cout << sent_byte << endl; // debug purpose
You can send the size binary, but that requires you to take architectural differences between devices and operating systems into account¹.
Here's my take on actually coding the protocol reusably:
//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
#include <boost/asio.hpp>
#include <boost/endian/arithmetic.hpp>
namespace ba = boost::asio;
using ba::ip::tcp;
using error_code = boost::system::error_code;
namespace Protocol { // your library
using net_size_t = boost::endian::big_int32_t; // This protocol uses Big-endian network byte order
template <typename Derived, typename Token, typename Sig = void(error_code, size_t)>
struct base_async_op : std::enable_shared_from_this<Derived> {
using base_type = base_async_op<Derived, Token, Sig>;
template <typename DeducedToken>
base_async_op(DeducedToken &&token) : _token(std::forward<DeducedToken>(token)) {}
using _Token = std::decay_t<Token>;
using _Init = ba::async_completion<_Token, Sig>;
using _Handler = typename _Init::completion_handler_type;
_Token _token;
_Init _init {_token};
auto get_allocator() const noexcept {
return (boost::asio::get_associated_allocator)(_init.completion_handler);
}
using executor_type = ba::associated_executor_t<_Handler>;
executor_type get_executor() const noexcept {
return (boost::asio::get_associated_executor)(_init.completion_handler);
}
Derived& derived() { return static_cast<Derived&>(*this); }
Derived const& derived() const { return static_cast<Derived const&>(*this); }
template <typename F>
auto wrap(F&& f) const {
//std::cout << "WRAP: " << typeid(derived().get_executor()).name() << "\n";
return ba::bind_executor(derived().get_executor(), std::forward<F>(f));
}
};
template <typename Derived, typename Stream, typename Token, typename Sig = void(error_code, size_t)>
struct stream_async_op : base_async_op<Derived, Token, Sig> {
using base_type = stream_async_op<Derived, Stream, Token, Sig>;
template <typename DeducedToken>
stream_async_op(Stream& s, DeducedToken &&token) : base_async_op<Derived, Token, Sig>(std::forward<DeducedToken>(token)), _stream(s) {}
Stream& _stream;
using executor_type = ba::associated_executor_t<typename stream_async_op::_Handler, decltype(std::declval<Stream>().get_executor())>;
executor_type get_executor() const noexcept {
return (boost::asio::get_associated_executor)(this->_init.completion_handler, _stream.get_executor());
}
};
template <typename AsyncStream, typename Buffer, typename Token>
auto async_transmit(AsyncStream& s, Buffer message_buffer, Token&& token) {
struct op : stream_async_op<op, AsyncStream, Token> {
using op::base_type::base_type;
using op::base_type::_init;
using op::base_type::_stream;
net_size_t _length[1];
auto run(Buffer buffer) {
auto self = this->shared_from_this();
_length[0] = ba::buffer_size(buffer);
ba::async_write(_stream, std::vector<ba::const_buffer> { ba::buffer(_length), buffer },
this->wrap([self,this](error_code ec, size_t transferred) { _init.completion_handler(ec, transferred); }));
return _init.result.get();
}
};
return std::make_shared<op>(s, std::forward<Token>(token))->run(message_buffer);
}
template <typename AsyncStream, typename Buffer, typename Token>
auto async_receive(AsyncStream& s, Buffer& output, Token&& token) {
struct op : stream_async_op<op, AsyncStream, Token> {
using op::base_type::base_type;
using op::base_type::_init;
using op::base_type::_stream;
net_size_t _length[1] = {0};
auto run(Buffer& output) {
auto self = this->shared_from_this();
ba::async_read(_stream, ba::buffer(_length), this->wrap([self, this, &output](error_code ec, size_t transferred) {
if (ec)
_init.completion_handler(ec, transferred);
else
ba::async_read(_stream, ba::dynamic_buffer(output), ba::transfer_exactly(_length[0]),
this->wrap([self, this](error_code ec, size_t transferred) {
_init.completion_handler(ec, transferred);
}));
}));
return _init.result.get();
}
};
return std::make_shared<op>(s, std::forward<Token>(token))->run(output);
}
template <typename Output = std::string, typename AsyncStream, typename Token>
auto async_receive(AsyncStream& s, Token&& token) {
struct op : stream_async_op<op, AsyncStream, Token, void(error_code, Output)> {
using op::base_type::base_type;
using op::base_type::_init;
using op::base_type::_stream;
Output _output;
net_size_t _length[1] = {0};
auto run() {
auto self = this->shared_from_this();
ba::async_read(_stream, ba::buffer(_length), [self,this](error_code ec, size_t) {
if (ec)
_init.completion_handler(ec, std::move(_output));
else
ba::async_read(_stream, ba::dynamic_buffer(_output), ba::transfer_exactly(_length[0]),
[self,this](error_code ec, size_t) { _init.completion_handler(ec, std::move(_output)); });
});
return _init.result.get();
}
};
return std::make_shared<op>(s, std::forward<Token>(token))->run();
}
} // Protocol
#include <iostream>
#include <iomanip>
int main() {
ba::io_context io;
tcp::socket sock(io);
sock.connect({tcp::v4(), 6767});
auto cont = [](auto name, auto continuation = []{}) { return [=](error_code ec, size_t transferred) {
std::cout << name << " completed (" << transferred << ", " << ec.message() << ")\n";
if (!ec) continuation();
}; };
auto report = [=](auto name) { return cont(name, []{}); };
// send chain
std::string hello = "Hello", world = "World";
Protocol::async_transmit(sock, ba::buffer(hello),
cont("Send hello", [&] { Protocol::async_transmit(sock, ba::buffer(world), report("Send world")); }
));
#ifndef DEMO_USE_FUTURE
// receive chain
std::string object1, object2;
Protocol::async_receive(sock, object1,
cont("Read object 1", [&] { Protocol::async_receive(sock, object2, report("Read object 2")); }));
io.run();
std::cout << "Response object 1: " << std::quoted(object1) << "\n";
std::cout << "Response object 2: " << std::quoted(object2) << "\n";
#else
// also possible, alternative completion mechanisms:
std::future<std::string> fut = Protocol::async_receive(sock, ba::use_future);
io.run();
std::cout << "Response object: " << std::quoted(fut.get()) << "\n";
#endif
}
When talking to a test server like:
xxd -p -r <<< '0000 0006 4e6f 2077 6179 0000 0005 4a6f 73c3 a90a' | netcat -l -p 6767 | xxd
The program prints
Send hello completed (9, Success)
Send world completed (9, Success)
Read object 1 completed (6, Success)
Read object 2 completed (5, Success)
Response object 1: "No way"
Response object 2: "José"
And the netcat side prints:
00000000: 0000 0005 4865 6c6c 6f00 0000 0557 6f72 ....Hello....Wor
00000010: 6c64 ld
Enabling handler tracking allows you to use handlerviz.pl to visualize the call chains:
Note You can change big_int32_t to little_int32_t without any further change. Of course, you should change the payload on the server side to match:
xxd -p -r <<< '0600 0000 4e6f 2077 6179 0500 0000 4a6f 73c3 a90a' | netcat -l -p 6767 | xxd
¹ Endianness, e.g. using Boost Endian or ::ntohs, ::ntohl, ::htons and ::htonl