socks4 with asynchronous boost::asio - c++

I'm trying to hack into an existing appilication a socks4 client. The program uses asynchronous boost::asio.
So i've worked out so far that i need to negotiate with the socks4 server first:
boost::asio::ip::tcp::endpoint socks_proxy{boost::asio::ip::make_address("127.0.0.1"),1080};
if( socks_proxy.protocol() != boost::asio::ip::tcp::v4() )
{
throw boost::system::system_error(
boost::asio::error::address_family_not_supported);
}
....
boost::asio::ip::tcp::socket* m_socket;
// negotiate with the socks server
// m_endpoint is an item in std::queue<boost::asio::ip::basic_endpoint<boost::asio::ip::tcp>> m_endpoints
boost::asio::ip::address_v4::bytes_type address_ = m_endpoint.address().to_v4().to_bytes();
unsigned short port = m_endpoint.port();
unsigned char port_high_byte_ = (port >> 8) & 0xff;
unsigned char port_low_byte_ = port & 0xff;
boost::array<boost::asio::const_buffer, 7> send_buffer =
{
{
boost::asio::buffer(&SOCKS_VERSION, 1), // const unsigned char SOCKS_VERSION = 0x04;
boost::asio::buffer(&SOCKS_CONNECT, 1), // const unsigned char SOCKS_VERSION = 0x04;
boost::asio::buffer(&port_high_byte_, 1),
boost::asio::buffer(&port_low_byte_, 1),
boost::asio::buffer(address_),
boost::asio::buffer("userid"),
boost::asio::buffer(&null_byte_, 1). // unsigned char null_byte_ = 0;
}
};
// initiate socks
boost::asio::write( m_socket, send_buffer );
// check it worked
unsigned char status_;
boost::array<boost::asio::mutable_buffer, 5> reply_buffer =
{
{
boost::asio::buffer(&null_byte_, 1),
boost::asio::buffer(&status_, 1),
boost::asio::buffer(&port_high_byte_, 1),
boost::asio::buffer(&port_low_byte_, 1),
boost::asio::buffer(address_)
}
};
boost::asio::read( m_socket, reply_buffer );
if( ! ( null_byte_ == 0 && status_ == 0x5a ) )
{
std::cout << "Proxy connection failed.\n";
}
However, the exist application code bascially does:
boost::asio::ip::tcp::socket* m_socket;
m_nonsecuresocket = std::make_shared<boost::asio::ip::tcp::socket>(m_io_service);
m_socket = m_nonsecuresocket.get();
m_socket->async_connect(m_endpoint,
m_io_strand.wrap(boost::bind(&CLASS::connect_handler, this, _1)));
so that even if i could get it to compile, the async_connect would disconnect the socket anyway.
How can i integrate the socks4 client code into the async_connect()?

As I commented, I think your question requires a lot more focus. However, since this is actually a useful question and it might be good to have an example, I went ahead and implemented a socks4::async_proxy_connect operation:
tcp::socket sock{io};
tcp::endpoint
target({}, 80), // connect to localhost:http
proxy{{}, 1080}; // via SOCKS4 proxy on localhost:1080
socks4::async_proxy_connect(sock, target, proxy, handler);
// continue using sock
Loose ends:
synchronous version is not implemented yet (but should be a lot simpler) added
does not include address resolution (just as your question). Integrating that would require quite a bit of the groundwork in boost::asio::async_connect that takes a resolver query. Sadly, that doesn't seen well factored for reuse.
Listing
File socks4.hpp
#include <boost/asio.hpp>
#include <boost/endian/arithmetic.hpp>
namespace socks4 { // threw in the kitchen sink for error codes
#ifdef STANDALONE_ASIO
using std::error_category;
using std::error_code;
using std::error_condition;
using std::system_error;
#else
namespace asio = boost::asio;
using boost::system::error_category;
using boost::system::error_code;
using boost::system::error_condition;
using boost::system::system_error;
#endif
enum class result_code {
ok = 0,
invalid_version = 1,
rejected_or_failed = 3,
need_identd = 4,
unconirmed_userid = 5,
//
failed = 99,
};
auto const& get_result_category() {
struct impl : error_category {
const char* name() const noexcept override { return "result_code"; }
std::string message(int ev) const override {
switch (static_cast<result_code>(ev)) {
case result_code::ok: return "Success";
case result_code::invalid_version: return "SOCKS4 invalid reply version";
case result_code::rejected_or_failed: return "SOCKS4 rejected or failed";
case result_code::need_identd: return "SOCKS4 unreachable (client not running identd)";
case result_code::unconirmed_userid: return "SOCKS4 identd could not confirm user ID";
case result_code::failed: return "SOCKS4 general unexpected failure";
default: return "unknown error";
}
}
error_condition
default_error_condition(int ev) const noexcept override {
return error_condition{ev, *this};
}
bool equivalent(int ev, error_condition const& condition)
const noexcept override {
return condition.value() == ev && &condition.category() == this;
}
bool equivalent(error_code const& error,
int ev) const noexcept override {
return error.value() == ev && &error.category() == this;
}
} const static instance;
return instance;
}
error_code make_error_code(result_code se) {
return error_code{
static_cast<std::underlying_type<result_code>::type>(se),
get_result_category()};
}
} // namespace socks4
template <>
struct boost::system::is_error_code_enum<socks4::result_code>
: std::true_type {};
namespace socks4 {
using namespace std::placeholders;
template <typename Endpoint> struct core_t {
Endpoint _target;
Endpoint _proxy;
core_t(Endpoint target, Endpoint proxy)
: _target(target)
, _proxy(proxy) {}
#pragma pack(push)
#pragma pack(1)
using ipv4_octets = boost::asio::ip::address_v4::bytes_type;
using net_short = boost::endian::big_uint16_t;
struct alignas(void*) Req {
uint8_t version = 0x04;
uint8_t cmd = 0x01;
net_short port;
ipv4_octets address;
} _request{0x04, 0x01, _target.port(),
_target.address().to_v4().to_bytes()};
struct alignas(void*) Res {
uint8_t reply_version;
uint8_t status;
net_short port;
ipv4_octets address;
} _response;
#pragma pack(pop)
using const_buffer = boost::asio::const_buffer;
using mutable_buffer = boost::asio::mutable_buffer;
auto request_buffers(char const* szUserId) const {
return std::array<const_buffer, 2>{
boost::asio::buffer(&_request, sizeof(_request)),
boost::asio::buffer(szUserId, strlen(szUserId) + 1)};
}
auto response_buffers() {
return boost::asio::buffer(&_response, sizeof(_response));
}
error_code get_result(error_code ec = {}) const {
if (ec)
return ec;
if (_response.reply_version != 0)
return result_code::invalid_version;
switch (_response.status) {
case 0x5a: return result_code::ok; // Request grantd
case 0x5B: return result_code::rejected_or_failed;
case 0x5C: return result_code::need_identd;
case 0x5D: return result_code::unconirmed_userid;
}
return result_code::failed;
}
};
template <typename Socket, typename Completion>
struct async_proxy_connect_op {
using Endpoint = typename Socket::protocol_type::endpoint;
using executor_type = typename Socket::executor_type;
auto get_executor() { return _socket.get_executor(); }
private:
core_t<Endpoint> _core;
Socket& _socket;
std::string _userId;
Completion _handler;
public:
async_proxy_connect_op(Completion handler, Socket& s, Endpoint target,
Endpoint proxy, std::string user_id = {})
: _core(target, proxy)
, _socket(s)
, _userId(std::move(user_id))
, _handler(std::move(handler)) {}
using Self = std::unique_ptr<async_proxy_connect_op>;
void init(Self&& self) { operator()(self, INIT{}); }
private:
// states
struct INIT{};
struct CONNECT{};
struct SENT{};
struct ONRESPONSE{};
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
void operator()(Self& self, INIT) {
_socket.async_connect(_core._proxy,
std::bind(Binder{std::move(self)}, CONNECT{}, _1));
}
void operator()(Self& self, CONNECT, error_code ec) {
if (ec) return _handler(ec);
boost::asio::async_write(
_socket,
_core.request_buffers(_userId.c_str()),
std::bind(Binder{std::move(self)}, SENT{}, _1, _2));
}
void operator()(Self& self, SENT, error_code ec, size_t xfer) {
if (ec) return _handler(ec);
auto buf = _core.response_buffers();
boost::asio::async_read(
_socket, buf, boost::asio::transfer_exactly(buffer_size(buf)),
std::bind(Binder{std::move(self)}, ONRESPONSE{}, _1, _2));
}
void operator()(Self& self, ONRESPONSE, error_code ec, size_t xfer) {
_handler(_core.get_result(ec));
}
};
template <typename Socket,
typename Endpoint = typename Socket::protocol_type::endpoint>
error_code proxy_connect(Socket& s, Endpoint ep, Endpoint proxy,
std::string const& user_id, error_code& ec) {
core_t<Endpoint> core(ep, proxy);
ec.clear();
s.connect(core._proxy, ec);
if (!ec)
boost::asio::write(s, core.request_buffers(user_id.c_str()),
ec);
auto buf = core.response_buffers();
if (!ec)
boost::asio::read(s, core.response_buffers(),
boost::asio::transfer_exactly(buffer_size(buf)), ec);
return ec = core.get_result(ec);
}
template <typename Socket,
typename Endpoint = typename Socket::protocol_type::endpoint>
void proxy_connect(Socket& s, Endpoint ep, Endpoint proxy,
std::string const& user_id = "") {
error_code ec;
if (proxy_connect(s, ep, proxy, user_id, ec))
throw system_error(ec);
}
template <typename Socket, typename Token,
typename Endpoint = typename Socket::protocol_type::endpoint>
auto async_proxy_connect(Socket& s, Endpoint ep, Endpoint proxy,
std::string user_id, Token&& token) {
using Result = asio::async_result<std::decay_t<Token>, void(error_code)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
using Op = async_proxy_connect_op<Socket, Completion>;
// make an owning self ptr, to serve a unique async chain
auto self =
std::make_unique<Op>(completion, s, ep, proxy, std::move(user_id));
self->init(std::move(self));
return result.get();
}
template <typename Socket, typename Token,
typename Endpoint = typename Socket::protocol_type::endpoint>
auto async_proxy_connect(Socket& s, Endpoint ep, Endpoint proxy, Token&& token) {
return async_proxy_connect<Socket, Token, Endpoint>(
s, ep, proxy, "", std::forward<Token>(token));
}
} // namespace socks4
Demo
File test.cpp
#include "socks4.hpp"
#include <boost/beast.hpp>
#include <boost/beast/http.hpp>
#include <iostream>
int main(int argc, char**) {
bool synchronous = argc > 1;
using boost::asio::ip::tcp;
boost::asio::thread_pool ctx(1); // just one thread will do
tcp::socket sock{ctx};
tcp::endpoint target(
boost::asio::ip::address_v4::from_string("173.203.57.63"), 80),
proxy{{}, 1080};
try {
if (synchronous) {
std::cerr << "Using synchronous interface" << std::endl;
socks4::proxy_connect(sock, target,
proxy); // throws system_error if failed
} else {
std::cerr << "Using asynchronous interface" << std::endl;
// using the async interface (still emulating synchronous by using
// future for brevity of this demo)
auto fut = socks4::async_proxy_connect(sock, target, proxy,
boost::asio::use_future);
fut.get(); // throws system_error if failed
}
// Now do a request using beast
namespace beast = boost::beast;
namespace http = beast::http;
{
http::request<http::empty_body> req(http::verb::get, "/", 11);
req.set(http::field::host, "coliru.stacked-crooked.com");
req.set(http::field::connection, "close");
std::cout << "-------\nRequest: " << req << "\n-------\n";
http::write(sock, req);
}
{
http::response<http::string_body> res;
beast::flat_buffer buf;
http::read(sock, buf, res);
std::cout << "\n-------\nResponse: " << res << "\n";
}
} catch(socks4::system_error const& se) {
std::cerr << "Error: " << se.code().message() << std::endl;
}
ctx.join();
}
Output
Using asynchronous interface
-------
Request: GET / HTTP/1.1
Host: coliru.stacked-crooked.com
Connection: close
-------
-------
Response: HTTP/1.1 200 OK
Content-Type: text/html;charset=utf-8
Content-Length: 8616
Server: WEBrick/1.4.2 (Ruby/2.5.1/2018-03-29) OpenSSL/1.0.2g
Date: Thu, 29 Apr 2021 19:05:03 GMT
Connection: close
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Frameset//EN">
<html>
<head>
<title>Coliru</title>
(rest of response omitted)

Related

Sending async https request using boost/beast

I had a synchronous method that send https request using http::write and than expect to read it's response using http::read.
However, in order to add timeout I had to move to async calls in my method. So I've tried to use http::async_read and http::async_write, but keep this overall flow synchronous so the method will return only once it has the https response.
here's my attempt :
class httpsClass {
std::optional<boost::beast::ssl_stream<boost::beast::tcp_stream>> ssl_stream_;
httpsClass(..) {
// notice that ssl_stream_ is initialized according to io_context_/ctx_
// that are class members that get set by c'tor args
ssl_stream_.emplace(io_context_, ctx_);
}
}
std::optional<boost::beast::http::response<boost::beast::http::dynamic_body>>
httpsClass::sendHttpsRequestAndGetResponse (
const boost::beast::http::request<boost::beast::http::string_body>
&request) {
try{
boost::asio::io_context ioc;
beast::flat_buffer buffer;
http::response<http::dynamic_body> res;
beast::get_lowest_layer(*ssl_stream_).expires_after(kTimeout);
boost::asio::spawn(ioc, [&, this](boost::asio::yield_context yield) {
auto sent = http::async_write(this->ssl_stream_.value(), request, yield);
auto received = http::async_read(this->ssl_stream_.value(), buffer, res, yield);
});
ioc.run();// this will finish only once the task above will be fully executed.
return res;
} catch (const std::exception &e) {
log("Error sending/receiving:{}", e.what());
return std::nullopt;
}
}
During trial, this method above reaches the task I assign for the internal io contexts (ioc). However, it gets stuck inside this task on the method async_write.
Anybody can help me figure out why it gets stuck? could it be related to the fact that ssl_stream_ is initialize with another io context object (io_context_) ?
Yes. The default executor for completion handlers on the ssl_stream_ is the outer io_context, which cannot make progress, because you're likely not running it.
My hint would be to:
avoid making the second io_context
also use the more typical future<Response> rather than optional<Response> (which loses the the error information)
avoid passing the io_context&. Instead pass executors, which you can more easily change to be a strand executor if so required.
Adding some code to make it self-contained:
class httpsClass {
ssl::context& ctx_;
std::string host_;
std::optional<beast::ssl_stream<beast::tcp_stream>> ssl_stream_;
beast::flat_buffer buffer_;
static constexpr auto kTimeout = 3s;
public:
httpsClass(net::any_io_executor ex, ssl::context& ctx, std::string host)
: ctx_(ctx)
, host_(host)
, ssl_stream_(std::in_place, ex, ctx_) {
auto ep = tcp::resolver(ex).resolve(host, "https");
ssl_stream_->next_layer().connect(ep);
ssl_stream_->handshake(ssl::stream_base::handshake_type::client);
log("Successfully connected to {} for {}",
ssl_stream_->next_layer().socket().remote_endpoint(), ep->host_name());
}
using Request = http::request<http::string_body>;
using Response = http::response<http::dynamic_body>;
std::future<Response> performRequest(Request const&);
};
Your implementation was pretty close, except for the unnecessary service:
std::future<httpsClass::Response>
httpsClass::performRequest(Request const& request) {
std::promise<Response> promise;
auto fut = promise.get_future();
auto coro = [this, r = request, p = std::move(promise)] //
(net::yield_context yield) mutable {
try {
auto& s = *ssl_stream_;
get_lowest_layer(s).expires_after(kTimeout);
r.prepare_payload();
r.set(http::field::host, host_);
auto sent = http::async_write(s, r, yield);
log("Sent: {}", sent);
http::response<http::dynamic_body> res;
auto received = http::async_read(s, buffer_, res, yield);
log("Received: {}", received);
p.set_value(std::move(res));
} catch (...) {
p.set_exception(std::current_exception());
}
};
spawn(ssl_stream_->get_executor(), std::move(coro));
return fut;
}
Now, it is important to have the io_service run()-ning for any asynchronous operations. With completely asynchronous code you wouldn't need threads, but as you are blocking on the response you will. The easiest way is to replace io_service with a thread_pool which does the run()-ning for you.
int main() {
net::thread_pool ioc;
ssl::context ctx(ssl::context::sslv23_client);
ctx.set_default_verify_paths();
for (auto query : {"/delay/2", "/delay/5"}) {
try {
httpsClass client(make_strand(ioc), ctx, "httpbin.org");
auto res = client.performRequest({http::verb::get, query, 11});
log("Request submitted... waiting for response");
log("Response: {}", res.get());
} catch (boost::system::system_error const& se) {
auto const& ec = se.code();
log("Error sending/receiving:{} at {}", ec.message(), ec.location());
} catch (std::exception const& e) {
log("Error sending/receiving:{}", e.what());
}
}
ioc.join();
}
As you can see this test will run two requests against https://httpbin.org/#/Dynamic_data/get_delay__delay_. The second will timeout because 5s exceeds the 3s expiration on the ssl_stream_.
Full Demo
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/beast.hpp>
#include <boost/beast/ssl.hpp>
#include <fmt/ostream.h>
#include <fmt/ranges.h>
#include <optional>
using namespace std::chrono_literals;
namespace net = boost::asio;
namespace beast = boost::beast;
namespace http = beast::http;
namespace ssl = net::ssl;
using net::ip::tcp;
////// LOG STUBS
template <> struct fmt::formatter<boost::source_location> : fmt::ostream_formatter {};
template <> struct fmt::formatter<tcp::endpoint> : fmt::ostream_formatter {};
template <bool isRequest, typename... Args>
struct fmt::formatter<http::message<isRequest, Args...>> : fmt::ostream_formatter {};
static inline void log(auto const& fmt, auto const&... args) {
fmt::print(fmt::runtime(fmt), args...);
fmt::print("\n");
std::fflush(stdout);
}
////// END LOG STUBS
class httpsClass {
ssl::context& ctx_;
std::string host_;
std::optional<beast::ssl_stream<beast::tcp_stream>> ssl_stream_;
beast::flat_buffer buffer_;
static constexpr auto kTimeout = 3s;
public:
httpsClass(net::any_io_executor ex, ssl::context& ctx, std::string host)
: ctx_(ctx)
, host_(host)
, ssl_stream_(std::in_place, ex, ctx_) {
auto ep = tcp::resolver(ex).resolve(host, "https");
ssl_stream_->next_layer().connect(ep);
ssl_stream_->handshake(ssl::stream_base::handshake_type::client);
log("Successfully connected to {} for {}",
ssl_stream_->next_layer().socket().remote_endpoint(), ep->host_name());
}
using Request = http::request<http::string_body>;
using Response = http::response<http::dynamic_body>;
std::future<Response> performRequest(Request const&);
};
std::future<httpsClass::Response>
httpsClass::performRequest(Request const& request) {
std::promise<Response> promise;
auto fut = promise.get_future();
auto coro = [this, r = request, p = std::move(promise)] //
(net::yield_context yield) mutable {
try {
auto& s = *ssl_stream_;
get_lowest_layer(s).expires_after(kTimeout);
r.prepare_payload();
r.set(http::field::host, host_);
auto sent = http::async_write(s, r, yield);
log("Sent: {}", sent);
http::response<http::dynamic_body> res;
auto received = http::async_read(s, buffer_, res, yield);
log("Received: {}", received);
p.set_value(std::move(res));
} catch (...) {
p.set_exception(std::current_exception());
}
};
spawn(ssl_stream_->get_executor(), std::move(coro));
return fut;
}
int main() {
net::thread_pool ioc;
ssl::context ctx(ssl::context::sslv23_client);
ctx.set_default_verify_paths();
for (auto query : {"/delay/2", "/delay/5"}) {
try {
httpsClass client(make_strand(ioc), ctx, "httpbin.org");
auto res = client.performRequest({http::verb::get, query, 11});
log("Request submitted... waiting for response");
log("Response: {}", res.get());
} catch (boost::system::system_error const& se) {
auto const& ec = se.code();
log("Error sending/receiving:{} at {}", ec.message(), ec.location());
} catch (std::exception const& e) {
log("Error sending/receiving:{}", e.what());
}
}
ioc.join();
}
Live on my system:

Boost Beast Async Websocket Server How to interface with session?

So I don't know why but I can't wrap my head around the boost Beast websocket server and how you can (or should) interact with it.
The basic program I made looks like this, across 2 classes (WebSocketListener and WebSocketSession)
https://www.boost.org/doc/libs/develop/libs/beast/example/websocket/server/async/websocket_server_async.cpp
Everything works great, I can connect, and it echos messages. We will only ever have 1 active session, and I'm struggling to understand how I can interface with this session from outside its class, in my int main() for example or another class that may be responsible for issuing read/writes. We will be using a simple Command design pattern of commands async coming into a buffer that get processed against hardware and then async_write back out the results. The reading and queuing is straight forward and will be done in the WebsocketSession, but everything I see for write is just reading/writing directly inside the session and not getting external input.
I've seen examples using things like boost::asio::async_write(socket, buffer, ...) but I'm struggling to understand how I get a reference to said socket when the session is created by the listener itself.
Instead of depending on a socket from outside of the session, I'd depend on your program logic to implement the session.
That's because the session (connection) will govern its own lifetime, arriving spontaneously and potentially disconnecting spontaneously. Your hardware, most likely, doesn't.
So, borrowing the concept of "Dependency Injection" tell your listener about your application logic, and then call into that from the session. (The listener will "inject" the dependency into each newly created session).
Let's start from a simplified/modernized version of your linked example.
Now, where we prepare a response, you want your own logic injected, so let's write it how we would imagine it:
void on_read(beast::error_code ec, std::size_t /*bytes_transferred*/) {
if (ec == websocket::error::closed) return;
if (ec.failed()) return fail(ec, "read");
// Process the message
response_ = logic_->Process(beast::buffers_to_string(buffer_));
ws_.async_write(
net::buffer(response_),
beast::bind_front_handler(&session::on_write, shared_from_this()));
}
Here we declare the members and initialize them from the constructor:
std::string response_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
explicit session(tcp::socket&& socket,
std::shared_ptr<AppDomain::Logic> logic)
: ws_(std::move(socket))
, logic_(logic) {}
Now, we need to inject the listener with the logic so we can pass it along:
class listener : public std::enable_shared_from_this<listener> {
net::any_io_executor ex_;
tcp::acceptor acceptor_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
listener(net::any_io_executor ex, tcp::endpoint endpoint,
std::shared_ptr<AppDomain::Logic> logic)
: ex_(ex)
, acceptor_(ex)
, logic_(logic) {
So that we can pass it along:
void on_accept(beast::error_code ec, tcp::socket socket) {
if (ec) {
fail(ec, "accept");
} else {
std::make_shared<session>(std::move(socket), logic_)->run();
}
// Accept another connection
do_accept();
}
Now making the real logic in main:
auto logic = std::make_shared<AppDomain::Logic>("StackOverflow Demo/");
try {
// The io_context is required for all I/O
net::thread_pool ioc(threads);
std::make_shared<listener>(ioc.get_executor(),
tcp::endpoint{address, port}, logic)
->run();
ioc.join();
} catch (beast::system_error const& se) {
fail(se.code(), "listener");
}
Demo Logic
Just for fun, let's implement some random logic, that might be implemented in hardware in the future:
namespace AppDomain {
struct Logic {
std::string banner;
Logic(std::string msg) : banner(std::move(msg)) {}
std::string Process(std::string request) {
std::cout << "Processing: " << std::quoted(request) << std::endl;
std::string result;
auto fold = [&result](auto op, double initial) {
return [=, &result](auto& ctx) {
auto& args = _attr(ctx);
auto v = accumulate(args.begin(), args.end(), initial, op);
result = "Fold:" + std::to_string(v);
};
};
auto invalid = [&result](auto& ctx) {
result = "Invalid Command: " + _attr(ctx);
};
using namespace boost::spirit::x3;
auto args = rule<void, std::vector<double>>{} = '(' >> double_ % ',' >> ')';
auto add = "adding" >> args[fold(std::plus<>{}, 0)];
auto mul = "multiplying" >> args[fold(std::multiplies<>{}, 1)];
auto err = lexeme[+char_][invalid];
phrase_parse(begin(request), end(request), add | mul | err, blank);
return banner + result;
}
};
} // namespace AppDomain
Now you can see it in action: Full Listing
Where To Go From Here
What if you need multiple responses for one request?
You need a queue. I usually call those outbox so searching for outbox_, _outbox etc will give lots of examples.
Those examples will also show how to deal with other situations where writes can be "externally initiated", and how to safely enqueue those. Perhaps a very engaging example is here How to batch send unsent messages in asio
Listing For Reference
In case the links go dead in the future:
#include <boost/algorithm/string/trim.hpp>
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include <filesystem>
#include <functional>
#include <iostream>
static std::string g_app_name = "app-logic-service";
#include <boost/core/demangle.hpp> // just for our demo logic
#include <boost/spirit/home/x3.hpp> // idem
#include <numeric> // idem
namespace AppDomain {
struct Logic {
std::string banner;
Logic(std::string msg) : banner(std::move(msg)) {}
std::string Process(std::string request) {
std::string result;
auto fold = [&result](auto op, double initial) {
return [=, &result](auto& ctx) {
auto& args = _attr(ctx);
auto v = accumulate(args.begin(), args.end(), initial, op);
result = "Fold:" + std::to_string(v);
};
};
auto invalid = [&result](auto& ctx) {
result = "Invalid Command: " + _attr(ctx);
};
using namespace boost::spirit::x3;
auto args = rule<void, std::vector<double>>{} = '(' >> double_ % ',' >> ')';
auto add = "adding" >> args[fold(std::plus<>{}, 0)];
auto mul = "multiplying" >> args[fold(std::multiplies<>{}, 1)];
auto err = lexeme[+char_][invalid];
phrase_parse(begin(request), end(request), add | mul | err, blank);
return banner + result;
}
};
} // namespace AppDomain
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
// Report a failure
void fail(beast::error_code ec, char const* what) {
std::cerr << what << ": " << ec.message() << "\n";
}
class session : public std::enable_shared_from_this<session> {
websocket::stream<beast::tcp_stream> ws_;
beast::flat_buffer buffer_;
std::string response_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
explicit session(tcp::socket&& socket,
std::shared_ptr<AppDomain::Logic> logic)
: ws_(std::move(socket))
, logic_(logic) {}
void run() {
// Get on the correct executor
// strand for thread safety
dispatch(
ws_.get_executor(),
beast::bind_front_handler(&session::on_run, shared_from_this()));
}
private:
void on_run() {
// Set suggested timeout settings for the websocket
ws_.set_option(websocket::stream_base::timeout::suggested(
beast::role_type::server));
// Set a decorator to change the Server of the handshake
ws_.set_option(websocket::stream_base::decorator(
[](websocket::response_type& res) {
res.set(http::field::server,
std::string(BOOST_BEAST_VERSION_STRING) + " " +
g_app_name);
}));
// Accept the websocket handshake
ws_.async_accept(
beast::bind_front_handler(&session::on_accept, shared_from_this()));
}
void on_accept(beast::error_code ec) {
if (ec)
return fail(ec, "accept");
do_read();
}
void do_read() {
ws_.async_read(
buffer_,
beast::bind_front_handler(&session::on_read, shared_from_this()));
}
void on_read(beast::error_code ec, std::size_t /*bytes_transferred*/) {
if (ec == websocket::error::closed) return;
if (ec.failed()) return fail(ec, "read");
// Process the message
auto request = boost::algorithm::trim_copy(
beast::buffers_to_string(buffer_.data()));
std::cout << "Processing: " << std::quoted(request) << " from "
<< beast::get_lowest_layer(ws_).socket().remote_endpoint()
<< std::endl;
response_ = logic_->Process(request);
ws_.async_write(
net::buffer(response_),
beast::bind_front_handler(&session::on_write, shared_from_this()));
}
void on_write(beast::error_code ec, std::size_t bytes_transferred) {
boost::ignore_unused(bytes_transferred);
if (ec)
return fail(ec, "write");
// Clear the buffer
buffer_.consume(buffer_.size());
// Do another read
do_read();
}
};
// Accepts incoming connections and launches the sessions
class listener : public std::enable_shared_from_this<listener> {
net::any_io_executor ex_;
tcp::acceptor acceptor_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
listener(net::any_io_executor ex, tcp::endpoint endpoint,
std::shared_ptr<AppDomain::Logic> logic)
: ex_(ex)
, acceptor_(ex)
, logic_(logic) {
acceptor_.open(endpoint.protocol());
acceptor_.set_option(tcp::acceptor::reuse_address(true));
acceptor_.bind(endpoint);
acceptor_.listen(tcp::acceptor::max_listen_connections);
}
// Start accepting incoming connections
void run() { do_accept(); }
private:
void do_accept() {
// The new connection gets its own strand
acceptor_.async_accept(make_strand(ex_),
beast::bind_front_handler(&listener::on_accept,
shared_from_this()));
}
void on_accept(beast::error_code ec, tcp::socket socket) {
if (ec) {
fail(ec, "accept");
} else {
std::make_shared<session>(std::move(socket), logic_)->run();
}
// Accept another connection
do_accept();
}
};
int main(int argc, char* argv[]) {
g_app_name = std::filesystem::path(argv[0]).filename();
if (argc != 4) {
std::cerr << "Usage: " << g_app_name << " <address> <port> <threads>\n"
<< "Example:\n"
<< " " << g_app_name << " 0.0.0.0 8080 1\n";
return 1;
}
auto const address = net::ip::make_address(argv[1]);
auto const port = static_cast<uint16_t>(std::atoi(argv[2]));
auto const threads = std::max<int>(1, std::atoi(argv[3]));
auto logic = std::make_shared<AppDomain::Logic>("StackOverflow Demo/");
try {
// The io_context is required for all I/O
net::thread_pool ioc(threads);
std::make_shared<listener>(ioc.get_executor(),
tcp::endpoint{address, port}, logic)
->run();
ioc.join();
} catch (beast::system_error const& se) {
fail(se.code(), "listener");
}
}
UPDATE
In response to the comments I reified the outbox pattern again. Note some of the comments in the code.
Compiler Explorer
#include <boost/algorithm/string/trim.hpp>
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include <deque>
#include <filesystem>
#include <functional>
#include <iostream>
#include <list>
static std::string g_app_name = "app-logic-service";
#include <boost/core/demangle.hpp> // just for our demo logic
#include <boost/spirit/home/x3.hpp> // idem
#include <numeric> // idem
namespace AppDomain {
struct Logic {
std::string banner;
Logic(std::string msg) : banner(std::move(msg)) {}
std::string Process(std::string request) {
std::string result;
auto fold = [&result](auto op, double initial) {
return [=, &result](auto& ctx) {
auto& args = _attr(ctx);
auto v = accumulate(args.begin(), args.end(), initial, op);
result = "Fold:" + std::to_string(v);
};
};
auto invalid = [&result](auto& ctx) {
result = "Invalid Command: " + _attr(ctx);
};
using namespace boost::spirit::x3;
auto args = rule<void, std::vector<double>>{} = '(' >> double_ % ',' >> ')';
auto add = "adding" >> args[fold(std::plus<>{}, 0)];
auto mul = "multiplying" >> args[fold(std::multiplies<>{}, 1)];
auto err = lexeme[+char_][invalid];
phrase_parse(begin(request), end(request), add | mul | err, blank);
return banner + result;
}
};
} // namespace AppDomain
namespace beast = boost::beast; // from <boost/beast.hpp>
namespace http = beast::http; // from <boost/beast/http.hpp>
namespace websocket = beast::websocket; // from <boost/beast/websocket.hpp>
namespace net = boost::asio; // from <boost/asio.hpp>
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
// Report a failure
void fail(beast::error_code ec, char const* what) {
std::cerr << what << ": " << ec.message() << "\n";
}
class session : public std::enable_shared_from_this<session> {
websocket::stream<beast::tcp_stream> ws_;
beast::flat_buffer buffer_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
explicit session(tcp::socket&& socket,
std::shared_ptr<AppDomain::Logic> logic)
: ws_(std::move(socket))
, logic_(logic) {}
void run() {
// Get on the correct executor
// strand for thread safety
dispatch(
ws_.get_executor(),
beast::bind_front_handler(&session::on_run, shared_from_this()));
}
void post_message(std::string msg) {
post(ws_.get_executor(),
[self = shared_from_this(), this, msg = std::move(msg)] {
do_post_message(std::move(msg));
});
}
private:
void on_run() {
// on the strand
// Set suggested timeout settings for the websocket
ws_.set_option(websocket::stream_base::timeout::suggested(
beast::role_type::server));
// Set a decorator to change the Server of the handshake
ws_.set_option(websocket::stream_base::decorator(
[](websocket::response_type& res) {
res.set(http::field::server,
std::string(BOOST_BEAST_VERSION_STRING) + " " +
g_app_name);
}));
// Accept the websocket handshake
ws_.async_accept(
beast::bind_front_handler(&session::on_accept, shared_from_this()));
}
void on_accept(beast::error_code ec) {
// on the strand
if (ec)
return fail(ec, "accept");
do_read();
}
void do_read() {
// on the strand
buffer_.clear();
ws_.async_read(
buffer_,
beast::bind_front_handler(&session::on_read, shared_from_this()));
}
void on_read(beast::error_code ec, std::size_t /*bytes_transferred*/) {
// on the strand
if (ec == websocket::error::closed) return;
if (ec.failed()) return fail(ec, "read");
// Process the message
auto request = boost::algorithm::trim_copy(
beast::buffers_to_string(buffer_.data()));
std::cout << "Processing: " << std::quoted(request) << " from "
<< beast::get_lowest_layer(ws_).socket().remote_endpoint()
<< std::endl;
do_post_message(logic_->Process(request)); // already on the strand
do_read();
}
std::deque<std::string> _outbox;
void do_post_message(std::string msg) {
// on the strand
_outbox.push_back(std::move(msg));
if (_outbox.size() == 1)
do_write_loop();
}
void do_write_loop() {
// on the strand
if (_outbox.empty())
return;
ws_.async_write( //
net::buffer(_outbox.front()),
[self = shared_from_this(), this] //
(beast::error_code ec, size_t bytes_transferred) {
// on the strand
boost::ignore_unused(bytes_transferred);
if (ec)
return fail(ec, "write");
_outbox.pop_front();
do_write_loop();
});
}
};
// Accepts incoming connections and launches the sessions
class listener : public std::enable_shared_from_this<listener> {
net::any_io_executor ex_;
tcp::acceptor acceptor_;
std::shared_ptr<AppDomain::Logic> logic_;
public:
listener(net::any_io_executor ex, tcp::endpoint endpoint,
std::shared_ptr<AppDomain::Logic> logic)
: ex_(ex)
, acceptor_(make_strand(ex)) // NOTE to guard sessions_
, logic_(logic) {
acceptor_.open(endpoint.protocol());
acceptor_.set_option(tcp::acceptor::reuse_address(true));
acceptor_.bind(endpoint);
acceptor_.listen(tcp::acceptor::max_listen_connections);
}
// Start accepting incoming connections
void run() { do_accept(); }
void broadcast(std::string msg) {
post(acceptor_.get_executor(),
beast::bind_front_handler(&listener::do_broadcast,
shared_from_this(), std::move(msg)));
}
private:
using handle_t = std::weak_ptr<session>;
std::list<handle_t> sessions_;
void do_broadcast(std::string const& msg) {
for (auto handle : sessions_)
if (auto sess = handle.lock())
sess->post_message(msg);
}
void do_accept() {
// The new connection gets its own strand
acceptor_.async_accept(make_strand(ex_),
beast::bind_front_handler(&listener::on_accept,
shared_from_this()));
}
void on_accept(beast::error_code ec, tcp::socket socket) {
// on the strand
if (ec) {
fail(ec, "accept");
} else {
auto sess = std::make_shared<session>(std::move(socket), logic_);
sessions_.emplace_back(sess);
// optionally:
sessions_.remove_if(std::mem_fn(&handle_t::expired));
sess->run();
}
// Accept another connection
do_accept();
}
};
static void emulate_hardware_stuff(std::shared_ptr<listener> srv) {
using std::this_thread::sleep_for;
using namespace std::chrono_literals;
// Extremely simplistic. Instead I'd recommend `steady_timer` with
// `_async_wait` here, but since I'm just making a sketch...
unsigned i = 0;
while (true) {
sleep_for(1s);
srv->broadcast("Hardware thing #" + std::to_string(++i));
}
}
int main(int argc, char* argv[]) {
g_app_name = std::filesystem::path(argv[0]).filename();
if (argc != 4) {
std::cerr << "Usage: " << g_app_name << " <address> <port> <threads>\n"
<< "Example:\n"
<< " " << g_app_name << " 0.0.0.0 8080 1\n";
return 1;
}
auto const address = net::ip::make_address(argv[1]);
auto const port = static_cast<uint16_t>(std::atoi(argv[2]));
auto const threads = std::max<int>(1, std::atoi(argv[3]));
auto logic = std::make_shared<AppDomain::Logic>("StackOverflow Demo/");
try {
// The io_context is required for all I/O
net::thread_pool ioc(threads);
auto srv = std::make_shared<listener>( //
ioc.get_executor(), //
tcp::endpoint{address, port}, //
logic);
srv->run();
std::thread something_hardware(emulate_hardware_stuff, srv);
ioc.join();
something_hardware.join();
} catch (beast::system_error const& se) {
fail(se.code(), "listener");
}
}
With Live Demo:

How do I make this HTTPS connection persistent in Beast?

I'm making about 30,000 queries to a GraphQL server; because I have a high-latency connection, I'm doing many queries in parallel, using threads. Currently each query makes a new connection; I'd like to reuse the connections, which should reduce the time the whole download takes. Here's my code:
#include <boost/asio.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/beast.hpp>
#include <boost/beast/ssl.hpp>
#include <boost/asio/ssl/error.hpp>
#include <boost/asio/ssl/stream.hpp>
#include <chrono>
#include <vector>
#include <array>
#include <iostream>
#include "http.h"
namespace beast=boost::beast;
namespace http=beast::http;
namespace net=boost::asio;
namespace ssl=net::ssl;
using tcp=net::ip::tcp;
using namespace std;
namespace cr=chrono;
struct TimeBytes
/* Used to compute the latency and data rate, which will be used
* to compute the number of I/O threads for the next run.
*/
{
float ms;
int bytes;
};
cr::steady_clock clk;
vector<TimeBytes> timeBytes;
mutex timeBytesMutex;
thread_local string lastProto,lastHost,lastPort;
array<string,4> parseUrl(string url)
// protocol, hostname, port, path. All are strings, including the port.
{
size_t pos0=url.find("://");
size_t pos1;
array<string,4> ret;
ret[0]=url.substr(0,pos0);
if (pos0<url.length())
pos0+=3;
pos1=url.find("/",pos0);
ret[1]=url.substr(pos0,pos1-pos0);
ret[3]=url.substr(pos1);
pos0=ret[1].find(":");
if (pos0<ret[1].length())
{
ret[2]=ret[1].substr(pos0+1);
ret[1]=ret[1].substr(0,pos0);
}
else
if (ret[0]=="https")
ret[2]="443";
else if (ret[0]=="https")
ret[2]="80";
else
ret[2]="0";
return ret;
}
string httpPost(string url,string data)
{
net::io_context context;
ssl::context ctx(ssl::context::tlsv12_client);
tcp::resolver res(context);
tcp::resolver::results_type endpoints;
beast::ssl_stream<beast::tcp_stream> stream(context,ctx);
array<string,4> parsed=parseUrl(url);
http::request<http::string_body> req;
http::response<http::string_body> resp;
beast::flat_buffer buffer;
TimeBytes tb;
cr::nanoseconds elapsed;
cr::time_point<cr::steady_clock> timeStart=clk.now();
//if (parsed[0]==lastProto && parsed[1]==lastHost && parsed[2]==lastPort)
//cout<<"same host\n";
//load_root_certificates(ctx);
try
{
ctx.set_verify_mode(ssl::verify_peer);
endpoints=res.resolve(parsed[1],parsed[2]);
beast::get_lowest_layer(stream).connect(endpoints);
SSL_set_tlsext_host_name(stream.native_handle(),parsed[1].c_str());
if (parsed[0]=="https")
stream.handshake(net::ssl::stream_base::client);
req.method(http::verb::post);
req.target(parsed[3]);
req.set(http::field::host,parsed[1]);
req.set(http::field::connection,"keep-alive");
req.set(http::field::user_agent,BOOST_BEAST_VERSION_STRING);
req.set(http::field::content_type,"application/json");
req.set(http::field::accept,"application/json");
req.body()=data;
req.prepare_payload();
http::write(stream,req);
http::read(stream,buffer,resp);
elapsed=clk.now()-timeStart;
tb.ms=elapsed.count()/1e6;
tb.bytes=req.body().size()+resp.body().size()+7626;
// 7626 accounts for HTTP, TCP, IP, and Ethernet headers.
timeBytesMutex.lock();
timeBytes.push_back(tb);
timeBytesMutex.unlock();
beast::close_socket(beast::get_lowest_layer(stream));
if (DEBUG_QUERY)
{
cout<<parsed[0]<<"|\n"<<parsed[1]<<"|\n"<<parsed[2]<<"|\n"<<parsed[3]<<"|\n";
cout<<data<<"|\n";
}
}
catch (...)
{
}
lastProto=parsed[0];
lastHost=parsed[1];
lastPort=parsed[2];
return resp.body();
}
Most of the requests are to one server. A few GET requests are made to another server (using an httpGet function which is pretty similar to httpPost). After I download the data, I crunch them, so I'd like to close the connections before starting to crunch.
I tried making context, ctx, and stream thread-local, and stream.shutdown() and context.restart() before close_socket(), but the program crashed the second time the main thread called httpPost, from http::read throwing an error. (A worker thread made one query between the main thread's two queries.) At that point I was not trying to keep the connection open, but trying to make thread-local work so that I could keep the connection open.
I'd strongly suggest using async interfaces. Since the majority of time is obviously spent waiting for the IO, you likely can get all the throughput from just a single thread.
Here's an example that does answer your question (how to keep a client open for more than one request) while making the processing asynchronous. Right now, the downside is that all requests on a single client need to be sequenced (that's what I used the _tasks queue for). However this should probably serve as inspiration.
Note that the initiation functions work with all completion handler result types: net::use_future, net::spawn (coroutines) etc.
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/ssl.hpp>
#include <boost/beast.hpp>
#include <boost/beast/ssl.hpp>
#include <chrono>
#include <deque>
#include <iomanip>
#include <iostream>
namespace net = boost::asio;
namespace ssl = net::ssl;
namespace beast = boost::beast;
namespace http = beast::http;
using clk = std::chrono::steady_clock;
using net::ip::tcp;
using beast::error_code;
using namespace std::chrono_literals;
/* Used to compute the latency and data rate, which will be used to compute the
* number of I/O threads for the next run. */
struct TimeBytes {
long double ms;
size_t bytes;
};
static std::vector<TimeBytes> timeBytes;
static std::mutex timeBytesMutex;
struct Url {
struct Spec {
std::string hostname, port;
bool operator<(Spec const& rhs) const {
return std::tie(hostname, port) < std::tie(rhs.hostname, rhs.port);
}
};
std::string protocol, hostname, port, path;
Spec specification() const { return {hostname, port}; }
};
#include <boost/spirit/home/x3.hpp>
#include <boost/fusion/adapted/std_tuple.hpp>
namespace x3 = boost::spirit::x3;
Url parseUrl(std::string const& url)
{
Url ret;
std::string hostport;
{
static const auto url_ = *(x3::char_ - "://") >> "://" // protocol
>> +~x3::char_('/') // hostname
>> *x3::char_; // path
auto into = std::tie(ret.protocol, hostport, ret.path);
parse(begin(url), end(url), x3::expect[url_], into);
}
{
static const auto portspec_ = -(':' >> x3::uint_) >> x3::eoi;
static const auto hostport_ =
x3::raw[+(+~x3::char_(':') | !portspec_ >> x3::char_)] //
>> -portspec_;
boost::optional<uint16_t> port;
auto into = std::tie(ret.hostname, port);
parse(begin(hostport), end(hostport), x3::expect[hostport_], into);
if (port.has_value()) { ret.port = std::to_string(*port); }
else if (ret.protocol == "https") { ret.port = "443"; }
else if (ret.protocol == "http") { ret.port = "80"; }
else { ret.port = "0"; }
}
return ret;
}
struct Client : std::enable_shared_from_this<Client> {
public:
Client(net::any_io_executor ex, Url::Spec spec, ssl::context& ctx)
: _executor(ex)
, _spec(spec)
, _sslcontext(ctx)
{
}
template <typename Token>
auto async_request(http::verb verb, std::string const& path,
std::string const& data, Token&& token)
{
using R = typename net::async_result<std::decay_t<Token>,
void(error_code, std::string)>;
using H = typename R::completion_handler_type;
H handler(std::forward<Token>(token));
R result(handler);
auto chain_tasks = [this, h = std::move(handler),
self = shared_from_this()](auto&&... args) mutable {
if (!self->_tasks.empty()) {
dispatch(self->_executor, [this, self] {
if (not _tasks.empty()) _tasks.pop_front();
if (not _tasks.empty()) _tasks.front()->initiate();
});
}
std::move(h)(std::forward<decltype(args)>(args)...);
};
auto task = std::make_shared<RequestOp<decltype(chain_tasks)>>(
this, verb, path, data, chain_tasks);
enqueue(std::move(task));
return result.get();
}
template <typename Token>
auto async_post(std::string const& path, std::string const& data,
Token&& token)
{
return async_request(http::verb::post,path, data, std::forward<Token>(token));
}
template <typename Token>
auto async_get(std::string const& path, Token&& token)
{
return async_request(http::verb::get,path, "", std::forward<Token>(token));
}
private:
template <typename Token> auto async_reconnect(Token&& token)
{
using R = typename net::async_result<std::decay_t<Token>, void(error_code)>;
using H = typename R::completion_handler_type;
H handler(std::forward<Token>(token));
R result(handler);
assert(!_stream.has_value()); // probably a program flow bu
_stream.emplace(_executor, _sslcontext);
std::make_shared<ReconnectOp<H>>(this, std::move(handler))->start();
return result.get();
}
template <typename Handler>
struct ReconnectOp : std::enable_shared_from_this<ReconnectOp<Handler>> {
ReconnectOp(Client* client, Handler h)
: _client{client}
, _handler(std::move(h))
, _resolver(client->_stream->get_executor())
{
}
Client* _client;
Handler _handler;
tcp::resolver _resolver;
bool checked(error_code ec, bool complete = false) {
if (complete || ec)
std::move(_handler)(ec);
if (ec && _client->_stream.has_value())
{
std::cerr << "Socket " << _client->_stream->native_handle()
<< " closed due to " << ec.message() << std::endl;
_client->_stream.reset();
}
return !ec.failed();
}
void start()
{
_resolver.async_resolve(
_client->_spec.hostname, _client->_spec.port,
beast::bind_front_handler(&ReconnectOp::on_resolved,
this->shared_from_this()));
}
void on_resolved(error_code ec, tcp::resolver::results_type ep)
{
if (checked(ec)) {
beast::get_lowest_layer(*_client->_stream)
.async_connect(
ep,
beast::bind_front_handler(&ReconnectOp::on_connected,
this->shared_from_this()));
}
}
void on_connected(error_code ec, tcp::endpoint ep) {
if (checked(ec)) {
std::cerr << "Socket " << _client->_stream->native_handle()
<< " (re)connected to " << ep << std::endl;
auto& hostname = _client->_spec.hostname;
SSL_set_tlsext_host_name(_client->_stream->native_handle(),
hostname.c_str());
_client->_stream->async_handshake(
Stream::client,
beast::bind_front_handler(&ReconnectOp::on_ready,
this->shared_from_this()));
}
}
void on_ready(error_code ec) {
checked(ec, true);
}
};
struct IAsyncTask {
virtual void initiate() = 0;
};
template <typename Handler>
struct RequestOp : IAsyncTask, std::enable_shared_from_this<RequestOp<Handler>> {
RequestOp(Client* client, http::verb verb, std::string const& path,
std::string data, Handler h)
: _client(client)
, _handler(std::move(h))
, _request(verb, path, 11, std::move(data))
{
_request.set(http::field::host, _client->_spec.hostname);
_request.set(http::field::connection, "keep-alive");
_request.set(http::field::user_agent, BOOST_BEAST_VERSION_STRING);
_request.set(http::field::content_type, "application/json");
_request.set(http::field::accept, "application/json");
_request.prepare_payload();
}
Client* _client;
Handler _handler;
http::request<http::string_body> _request;
http::response<http::string_body> _response;
beast::flat_buffer _buffer;
size_t _bandwidth = 0;
clk::time_point _start = clk::now();
bool checked(error_code ec, bool complete = false) {
if (complete || ec)
std::move(_handler)(ec, std::move(_response.body()));
if (ec)
_client->_stream.reset();
return !ec.failed();
}
void initiate() override
{
if (!_client->_stream.has_value()) {
_client->async_reconnect(beast::bind_front_handler(
&RequestOp::on_connected, this->shared_from_this()));
} else {
on_connected(error_code{});
}
}
void on_connected(error_code ec) {
_start = clk::now(); // This matches the start of measurements in
// the original, synchronous code
http::async_write(*_client->_stream, _request,
beast::bind_front_handler(
&RequestOp::on_sent, this->shared_from_this()));
}
void on_sent(error_code ec, size_t transferred) {
_bandwidth += transferred; // measuring actual bytes including HTTP headers
if (checked(ec)) {
http::async_read(
*_client->_stream, _buffer, _response,
beast::bind_front_handler(&RequestOp::on_response,
this->shared_from_this()));
}
}
void on_response(error_code ec, size_t transferred) {
_bandwidth += transferred; // measuring actual bytes including HTTP headers
std::lock_guard lk(timeBytesMutex);
timeBytes.push_back({(clk::now() - _start) / 1.0ms, _bandwidth});
checked(ec, true);
}
};
private:
net::any_io_executor _executor;
Url::Spec _spec;
ssl::context& _sslcontext;
using Stream = beast::ssl_stream<beast::tcp_stream>;
std::optional<Stream> _stream; // nullopt when disconnected
// task queueing
using AsyncTask = std::shared_ptr<IAsyncTask>;
std::deque<AsyncTask> _tasks;
void enqueue(AsyncTask task) {
post(_executor,
[=, t = std::move(task), this, self = shared_from_this()] {
_tasks.push_back(std::move(t));
if (_tasks.size() == 1) {
_tasks.front()->initiate();
}
});
}
};
int main()
{
ssl::context ctx(ssl::context::tlsv12_client);
ctx.set_verify_mode(ssl::verify_peer);
ctx.set_default_verify_paths();
// load_root_certificates(ctx);
net::thread_pool io(1);
std::map<Url::Spec, std::shared_ptr<Client> > pool;
using V = http::verb;
for (auto [url, verb, data] : {
std::tuple //
{"https://httpbin.org/post", V::post, "post data"},
{"https://httpbin.org/delay/5", V::delete_, ""},
{"https://httpbin.org/base64/ZGVjb2RlZCBiYXM2NA==", V::get, ""},
{"https://httpbin.org/delay/7", V::patch, ""},
{"https://httpbin.org/stream/3", V::get, ""},
{"https://httpbin.org/uuid", V::get, ""},
}) //
{
auto parsed = parseUrl(url);
std::cout << std::quoted(parsed.protocol) << " "
<< std::quoted(parsed.hostname) << " "
<< std::quoted(parsed.port) << " "
<< std::quoted(parsed.path) << "\n";
auto spec = parsed.specification();
if (!pool.contains(spec)) {
pool.emplace(spec,
std::make_shared<Client>(
make_strand(io.get_executor()), spec, ctx));
}
pool.at(spec)->async_request(
verb, parsed.path, data,
[=, v = verb, u = url](error_code ec, std::string const& body) {
std::cout << v << " to " << u << ": " << std::quoted(body)
<< std::endl;
});
}
io.join();
for (auto& [time, bytes] : timeBytes) {
std::cout << bytes << " bytes in " << time << "ms\n";
}
}
On my system this prints
"https" "httpbin.org" "443" "/post"
"https" "httpbin.org" "443" "/delay/5"
"https" "httpbin.org" "443" "/base64/ZGVjb2RlZCBiYXM2NA=="
"https" "httpbin.org" "443" "/delay/7"
"https" "httpbin.org" "443" "/stream/3"
"https" "httpbin.org" "443" "/uuid"
Socket 0x7f4ad4001060 (re)connected to 18.232.227.86:443
POST to https://httpbin.org/post: "{
\"args\": {},
\"data\": \"post data\",
\"files\": {},
\"form\": {},
\"headers\": {
\"Accept\": \"application/json\",
\"Content-Length\": \"9\",
\"Content-Type\": \"application/json\",
\"Host\": \"httpbin.org\",
\"User-Agent\": \"Boost.Beast/318\",
\"X-Amzn-Trace-Id\": \"Root=1-618b513c-2c51c112061b10456a5e3d4e\"
},
\"json\": null,
\"origin\": \"163.158.244.77\",
\"url\": \"https://httpbin.org/post\"
}
"
DELETE to https://httpbin.org/delay/5: "{
\"args\": {},
\"data\": \"\",
\"files\": {},
\"form\": {},
\"headers\": {
\"Accept\": \"application/json\",
\"Content-Type\": \"application/json\",
\"Host\": \"httpbin.org\",
\"User-Agent\": \"Boost.Beast/318\",
\"X-Amzn-Trace-Id\": \"Root=1-618b513c-324c97504eb79d8b743c6c5d\"
},
\"origin\": \"163.158.244.77\",
\"url\": \"https://httpbin.org/delay/5\"
}
"
GET to https://httpbin.org/base64/ZGVjb2RlZCBiYXM2NA==: "decoded bas64"
PATCH to https://httpbin.org/delay/7: "{
\"args\": {},
\"data\": \"\",
\"files\": {},
\"form\": {},
\"headers\": {
\"Accept\": \"application/json\",
\"Content-Type\": \"application/json\",
\"Host\": \"httpbin.org\",
\"User-Agent\": \"Boost.Beast/318\",
\"X-Amzn-Trace-Id\": \"Root=1-618b5141-3a8c30e60562df583061fc5a\"
},
\"origin\": \"163.158.244.77\",
\"url\": \"https://httpbin.org/delay/7\"
}
"
GET to https://httpbin.org/stream/3: "{\"url\": \"https://httpbin.org/stream/3\", \"args\": {}, \"headers\": {\"Host\": \"httpbin.org\", \"X-Amzn-Trace-Id\": \"Root=1-618b5148-45fce8a8432930a006c0a574\", \"User-Agent\": \"Boost.Beast/318\", \"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}, \"origin\": \"163.158.244.77\", \"id\": 0}
{\"url\": \"https://httpbin.org/stream/3\", \"args\": {}, \"headers\": {\"Host\": \"httpbin.org\", \"X-Amzn-Trace-Id\": \"Root=1-618b5148-45fce8a8432930a006c0a574\", \"User-Agent\": \"Boost.Beast/318\", \"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}, \"origin\": \"163.158.244.77\", \"id\": 1}
{\"url\": \"https://httpbin.org/stream/3\", \"args\": {}, \"headers\": {\"Host\": \"httpbin.org\", \"X-Amzn-Trace-Id\": \"Root=1-618b5148-45fce8a8432930a006c0a574\", \"User-Agent\": \"Boost.Beast/318\", \"Content-Type\": \"application/json\", \"Accept\": \"application/json\"}, \"origin\": \"163.158.244.77\", \"id\": 2}
"
GET to https://httpbin.org/uuid: "{
\"uuid\": \"4557c909-880e-456c-8ef9-049a72f5fda1\"
}
"
826 bytes in 84.9807ms
752 bytes in 5267.26ms
425 bytes in 84.6031ms
751 bytes in 7085.28ms
1280 bytes in 86.6554ms
434 bytes in 85.0086ms
Note:
httpbin.org has all manner of test urls - some of which generate long delays, hence the timings
there's only 1 connection. In case of an IO error, we disconnect (and things should reconnect on the next request)
HTTP errors are not "errors" in that the connection stays valid
The DNS resolve, connect and handshake are also asynchronous

Boost::Asio : Why does async_write truncate the buffer when sending it through the given socket?

I'm currently attempting to design a fairly simple boost::asio server. My first unit test is fairly simple: send a JSON request {"COMMAND": "ADD_1", "VALUE" : 1} and receive the following response:
{
"SUCCESS" : true,
"VALUE" : 2
}
However, instead, the reply is truncated by one character after being read from the socket:
Reply is: {
"SUCCESS" : true,
"VALUE" : 2
Process finished with exit code 0
The code to write to the socket is fairly simple, a member function of a class RequestContext:
void RequestContext::DoWrite(std::size_t length)
{
JSONCPP_STRING parse_err;
Json::Value json_req, json_resp;
auto self(this->shared_from_this());
std::string client_req_str(data_);
if (reader_->parse(client_req_str.c_str(),
client_req_str.c_str() +
client_req_str.length(),
&json_req, &parse_err))
{
try {
// Get JSON response.
json_resp = ProcessRequest(json_req);
json_resp["SUCCESS"] = true;
} catch (const std::exception &ex) {
// If json parsing failed.
json_resp["SUCCESS"] = false;
json_resp["ERRORS"] = std::string(ex.what());
}
} else {
// If json parsing failed.
json_resp["SUCCESS"] = false;
json_resp["ERRORS"] = std::string(parse_err);
}
std::string resp = Json::writeString(writer_, json_resp);
boost::asio::async_write(socket_,
boost::asio::buffer(&resp[0], resp.size()),
[this, self]
(boost::system::error_code ec,
std::size_t bytes_xfered) {
if (!ec) DoRead();
});
}
I have verified that ProcessRequest returns the correct value, so the issue is evidently with async_write. I have tried increasing the value of the second argument to async_write, but it seems to have no effect. What am I doing wrong?
A minimum reproducible example can be found below:
#include <cstdlib>
#include <iostream>
#include <memory>
#include <utility>
#include <boost/asio.hpp>
#include <boost/system/error_code.hpp>
#include <json/json.h>
using boost::asio::ip::tcp;
using boost::system::error_code;
/// NOTE: This class exists exclusively for unit testing.
class RequestClass {
public:
/**
* Initialize class with value n to add sub from input values.
*
* #param n Value to add/sub from input values.
*/
explicit RequestClass(int n) : n_(n) {}
/// Value to add/sub from
int n_;
/**
* Add n to value in JSON request.
*
* #param request JSON request with field "value".
* #return JSON response containing modified field "value" = [original_value] + n.
*/
[[nodiscard]] Json::Value add_n(const Json::Value &request) const
{
Json::Value resp;
resp["SUCCESS"] = true;
// If value is present in request, return value + 1, else return error.
if (request.get("VALUE", NULL) != NULL) {
resp["VALUE"] = request["VALUE"].asInt() + this->n_;
} else {
resp["SUCCESS"] = false;
resp["ERRORS"] = "Invalid value.";
}
return resp;
}
/**
* Sun n from value in JSON request.
*
* #param request JSON request with field "value".
* #return JSON response containing modified field "value" = [original_value] - n.
*/
[[nodiscard]] Json::Value sub_n(const Json::Value &request) const
{
Json::Value resp, value;
resp["SUCCESS"] = true;
// If value is present in request, return value + 1, else return error.
if (request.get("VALUE", NULL) != NULL) {
resp["VALUE"] = request["VALUE"].asInt() - this->n_;
} else {
resp["SUCCESS"] = false;
resp["ERRORS"] = "Invalid value.";
}
return resp;
}
};
typedef std::function<Json::Value(RequestClass, const Json::Value &)> RequestClassMethod;
template<class RequestHandler, class RequestClass>
class RequestContext :
public std::enable_shared_from_this<RequestContext<RequestHandler,
RequestClass>>
{
public:
typedef std::map<std::string, RequestHandler> CommandMap;
RequestContext(tcp::socket socket, CommandMap commands,
RequestClass *request_class_inst)
: socket_(std::move(socket))
, commands_(std::move(commands))
, request_class_inst_(request_class_inst)
, reader_((new Json::CharReaderBuilder)->newCharReader())
{}
void Run()
{
DoRead();
}
void Kill()
{
continue_ = false;
}
private:
tcp::socket socket_;
RequestClass *request_class_inst_;
CommandMap commands_;
/// Reads JSON.
const std::unique_ptr<Json::CharReader> reader_;
/// Writes JSON.
Json::StreamWriterBuilder writer_;
bool continue_ = true;
char data_[2048];
void DoRead()
{
auto self(this->shared_from_this());
socket_.async_read_some(boost::asio::buffer(data_, 2048),
[this, self](error_code ec, std::size_t length)
{
if (!ec)
{
DoWrite(length);
}
});
}
void DoWrite(std::size_t length)
{
JSONCPP_STRING parse_err;
Json::Value json_req, json_resp;
auto self(this->shared_from_this());
std::string client_req_str(data_);
if (reader_->parse(client_req_str.c_str(),
client_req_str.c_str() +
client_req_str.length(),
&json_req, &parse_err))
{
try {
// Get JSON response.
json_resp = ProcessRequest(json_req);
json_resp["SUCCESS"] = true;
} catch (const std::exception &ex) {
// If json parsing failed.
json_resp["SUCCESS"] = false;
json_resp["ERRORS"] = std::string(ex.what());
}
} else {
// If json parsing failed.
json_resp["SUCCESS"] = false;
json_resp["ERRORS"] = std::string(parse_err);
}
std::string resp = Json::writeString(writer_, json_resp);
boost::asio::async_write(socket_,
boost::asio::buffer(&resp[0], resp.size()),
[this, self]
(boost::system::error_code ec,
std::size_t bytes_xfered) {
if (!ec) DoRead();
});
}
Json::Value ProcessRequest(Json::Value request)
{
Json::Value response;
std::string command = request["COMMAND"].asString();
// If command is not valid, give a response with an error.
if(commands_.find(command) == commands_.end()) {
response["SUCCESS"] = false;
response["ERRORS"] = "Invalid command.";
}
// Otherwise, run the relevant handler.
else {
RequestHandler handler = commands_.at(command);
response = handler(*request_class_inst_, request);
}
return response;
}
};
template<class RequestHandler, class RequestClass>
class Server {
public:
typedef std::map<std::string, RequestHandler> CommandMap;
Server(boost::asio::io_context &io_context, short port,
const CommandMap &commands,
RequestClass *request_class_inst)
: acceptor_(io_context, tcp::endpoint(tcp::v4(), port))
, commands_(commands)
, request_class_inst_(request_class_inst)
{
DoAccept();
}
~Server()
{
Kill();
}
void Kill()
{
continue_ = false;
}
private:
tcp::acceptor acceptor_;
bool continue_ = true;
CommandMap commands_;
RequestClass *request_class_inst_;
void DoAccept()
{
acceptor_.async_accept(
[this](boost::system::error_code ec, tcp::socket socket) {
if (!ec)
std::make_shared<RequestContext<RequestHandler, RequestClass>>
(std::move(socket), commands_, request_class_inst_)->Run();
DoAccept();
});
}
};
void RunServer(short port)
{
boost::asio::io_context io_context;
auto *request_inst = new RequestClass(1);
std::map<std::string, RequestClassMethod> commands {
{"ADD_1", std::mem_fn(&RequestClass::add_n)},
{"SUB_1", std::mem_fn(&RequestClass::sub_n)}
};
Server<RequestClassMethod, RequestClass> s(io_context, port, commands,
request_inst);
io_context.run();
}
void RunServerInBackground(short port)
{
std::thread t([port] { RunServer(port); });
t.detach();
}
int main()
{
try
{
RunServerInBackground(5000);
boost::asio::io_context io_context;
tcp::socket s(io_context);
tcp::resolver resolver(io_context);
boost::asio::connect(s, resolver.resolve("127.0.0.1", "5000"));
char request[2048] = R"({"COMMAND": "ADD_1", "VALUE" : 1})";
size_t request_length = std::strlen(request);
boost::asio::write(s, boost::asio::buffer(request, request_length));
char reply[2048];
size_t reply_length = boost::asio::read(s, boost::asio::buffer(reply, request_length));
std::cout << "Reply is: ";
std::cout << reply << std::endl;
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
The outgoing buffer needs to be a class member, just like data_, so that the lifetime is guaranteed until async_write is completed.
You can also spot issues like this with linter/runtime checks like ASAN/UBSAN or Valgrind.
UPDATE
Also
size_t reply_length =
boost::asio::read(s, boost::asio::buffer(reply, request_length));
wrongly uses request_length. As a rule, avoid manually specifying buffer sizes, at any time.
Besides, your protocol doesn't provide framing, so you cannot practically keep the same connection open for newer requests (you don't know how many bytes to expect for a response to be complete). I'll "fix" it here by closing the connection after the first request, so we have a working demo.
There's also a race condition with the continue_ flags, but I'll leave that as an exorcism for the reader.
Of course, consider not leaking the request class instance.
Oh, I also switched to Boost JSON as it seemed an easier fit:
Live On Coliru
#include <boost/asio.hpp>
#include <boost/json.hpp>
#include <boost/json/src.hpp>
#include <iostream>
using boost::asio::ip::tcp;
using boost::system::error_code;
namespace json = boost::json;
using Value = json::object;
/// NOTE: This class exists exclusively for unit testing.
struct Sample {
int n_;
Value add_n(Value const& request) const { return impl(std::plus<>{}, request); }
Value sub_n(Value const& request) const { return impl(std::minus<>{}, request); }
Value mul_n(Value const& request) const { return impl(std::multiplies<>{}, request); }
Value div_n(Value const& request) const { return impl(std::divides<>{}, request); }
private:
template <typename Op> Value impl(Op op, Value const& req) const {
return (req.contains("VALUE"))
? Value{{"VALUE", op(req.at("VALUE").as_int64(), n_)},
{"SUCCESS", true}}
: Value{{"ERRORS", "Invalid value."}, {"SUCCESS", false}};
}
};
using RequestClassMethod =
std::function<Value(Sample, Value const&)>;
template <class RequestHandler, class RequestClass>
class RequestContext
: public std::enable_shared_from_this<
RequestContext<RequestHandler, RequestClass>> {
public:
using CommandMap = std::map<std::string, RequestHandler>;
RequestContext(tcp::socket socket, CommandMap commands,
RequestClass* request_class_inst)
: socket_(std::move(socket))
, commands_(std::move(commands))
, request_class_inst_(request_class_inst)
{}
void Run() { DoRead(); }
void Kill() { continue_ = false; }
private:
tcp::socket socket_;
CommandMap commands_;
RequestClass* request_class_inst_;
bool continue_ = true;
char data_[2048];
std::string resp_;
void DoRead()
{
socket_.async_read_some(
boost::asio::buffer(data_),
[this, self = this->shared_from_this()](error_code ec, std::size_t length) {
if (!ec) {
DoWrite(length);
}
});
}
void DoWrite(std::size_t length)
{
Value json_resp;
try {
auto json_req = json::parse({data_, length}).as_object();
json_resp = ProcessRequest(json_req);
json_resp["SUCCESS"] = true;
} catch (std::exception const& ex) {
json_resp = {{"SUCCESS", false}, {"ERRORS", ex.what()}};
}
resp_ = json::serialize(json_resp);
boost::asio::async_write(socket_, boost::asio::buffer(resp_),
[this, self = this->shared_from_this()](
error_code ec, size_t bytes_xfered) {
if (!ec)
DoRead();
});
}
Value ProcessRequest(Value request)
{
auto command = request.contains("COMMAND")
? request["COMMAND"].as_string() //
: "";
std::string cmdstr(command.data(), command.size());
// If command is not valid, give a response with an error.
return commands_.contains(cmdstr) && request_class_inst_
? commands_.at(cmdstr)(*request_class_inst_, request)
: Value{{"SUCCESS", false}, {"ERRORS","Invalid command."}};
}
};
template<class RequestHandler, class RequestClass>
class Server {
public:
using CommandMap = std::map<std::string, RequestHandler>;
Server(boost::asio::io_context& io_context, uint16_t port,
const CommandMap& commands, RequestClass* request_class_inst)
: acceptor_(io_context, {{}, port})
, commands_(commands)
, request_class_inst_(request_class_inst)
{
DoAccept();
}
~Server() { Kill(); }
void Kill() { continue_ = false; }
private:
tcp::acceptor acceptor_;
bool continue_ = true;
CommandMap commands_;
RequestClass *request_class_inst_;
void DoAccept()
{
acceptor_.async_accept(
[this](error_code ec, tcp::socket socket) {
if (!ec)
std::make_shared<
RequestContext<RequestHandler, RequestClass>>(
std::move(socket), commands_, request_class_inst_)
->Run();
DoAccept();
});
}
};
void RunServer(uint16_t port)
{
boost::asio::io_context io_context;
Server<RequestClassMethod, Sample> s(
io_context, port,
{{"ADD_2", std::mem_fn(&Sample::add_n)},
{"SUB_2", std::mem_fn(&Sample::sub_n)},
{"MUL_2", std::mem_fn(&Sample::mul_n)},
{"DIV_2", std::mem_fn(&Sample::div_n)}},
new Sample{2});
io_context.run();
}
void RunServerInBackground(uint16_t port)
{
std::thread t([port] { RunServer(port); });
t.detach();
}
int main() try {
RunServerInBackground(5000);
::sleep(1); // avoid startup race
boost::asio::io_context io;
tcp::socket s(io);
s.connect({{}, 5000});
std::string const request = R"({"COMMAND": "MUL_2", "VALUE" : 21})";
std::cout << "Request: " << std::quoted(request, '\'') << std::endl;
boost::asio::write(s, boost::asio::buffer(request));
s.shutdown(tcp::socket::shutdown_send); // avoid framing problems
error_code ec;
char reply[2048];
size_t reply_length = boost::asio::read(s, boost::asio::buffer(reply), ec);
std::cout << "Reply is: "
<< std::quoted(std::string_view(reply, reply_length), '\'')
<< " (" << ec.message() << ")" << std::endl;
} catch (std::exception const& e) {
std::cerr << "Exception: " << e.what() << "\n";
}
Prints
Request: '{"COMMAND": "MUL_2", "VALUE" : 21}'
Reply is: '{"VALUE":42,"SUCCESS":true}' (End of file)

connection attempt with timout as a composed operation using ASIO

I have written a class that attempts to establish a connection with a TCP server provided with a custom timeout and a number of attempts. It is a Callable object that returns an std::future for a result.
The problems with my initial implementation are:
the object has to be persistent until either a connection has been established, or it has run out of attempts or a stop case error has occurred. So I have to store it inside my class which I hope to avoid.
asio composed operations provide means for customization for the control flow on return: a CompletionToken might be a simple callback, a future, or a coroutine could be used. In my case I have bound the user to a future.
This is my initial implementation for a connection attempt with a custom timeout and number of attempts:
template<typename Connection>
class connection_attempt
{
public:
using connection_type = Connection;
using endpoint_type = typename Connection::endpoint_type;
template<typename Endpoint>
using require_endpoint = typename std::enable_if<std::is_same<Endpoint, endpoint_type>::value>::type;
constexpr static auto default_timeout()
{
return std::chrono::milliseconds(3000);
}
constexpr static size_t infinite_attempts()
{
return size_t() - 1;
}
explicit connection_attempt(Connection &connection)
: connection_(connection)
{}
template<typename Callable>
explicit connection_attempt(Connection &connection,
Callable &&stopOnError)
: connection_(connection),
stopOnError_(std::forward<Callable>(stopOnError))
{}
template<typename Endpoint,
typename Duration,
typename = require_endpoint<Endpoint>>
std::future<bool> operator()(Endpoint &&endpoint,
size_t attempts,
Duration &&timeout = default_timeout())
{
connectionResult_ = {};
asyncConnect(std::forward<Endpoint>(endpoint),
attempts,
std::forward<Duration>(timeout));
return connectionResult_.get_future();
}
// default attempts = infinite_attempts
template<typename Endpoint,
typename Duration,
typename = require_endpoint<Endpoint>>
std::future<bool> operator()(Endpoint endpoint,
Duration &&timeout = default_timeout())
{
connectionResult_ = {};
asyncConnect(std::forward<Endpoint>(endpoint),
infinite_attempts(),
std::forward<Duration>(timeout));
return connectionResult_.get_future();
}
private:
connection_type &connection_;
asio::steady_timer timer_
{connection_.get_executor()}; // this does not compile -> {asio::get_associated_executor(connection_)};
std::function<bool(const asio::error_code &)> stopOnError_;
std::promise<bool> connectionResult_;
// cancels the connection on timeout!
template<typename Duration>
void startTimer(const Duration &timeout)
{
timer_.expires_after(timeout); // it will automatically cancel a pending timer
timer_.async_wait(
[this, timeout](const asio::error_code &errorCode)
{
// will occur on connection error before timeout
if (errorCode == asio::error::operation_aborted)
return;
// TODO: handle timer errors? What are the possible errors?
assert(!errorCode && "unexpected timer error!");
// stop current connection attempt
connection_.cancel();
});
}
void stopTimer()
{
timer_.cancel();
}
/**
* Will be trying to connect until:<br>
* - has run out of attempts
* - has been required to stop by stopOnError callback (if it was set)
* #param endpoint
* #param attempts
*/
template<typename Duration>
void asyncConnect(endpoint_type endpoint,
size_t attempts,
Duration &&timeout)
{
startTimer(timeout);
connection_.async_connect(endpoint, [this,
endpoint,
attempts,
timeout = std::forward<Duration>(timeout)](const asio::error_code &errorCode)
{
if (!errorCode)
{
stopTimer();
connectionResult_.set_value(true);
return;
}
const auto attemptsLeft = attempts == infinite_attempts() ?
infinite_attempts() :
attempts - 1;
if ((stopOnError_ &&
stopOnError_(errorCode == asio::error::operation_aborted ?
// special case for operation_aborted on timer expiration - need to send timed_out explicitly
// this should only be resulted from the timer calling cancel()
asio::error::timed_out :
errorCode)) ||
!attemptsLeft)
{
stopTimer();
connectionResult_.set_value(false);
return;
}
asyncConnect(endpoint,
attemptsLeft,
timeout);
});
}
};
// this should be an asynchornous function with a custom CompletionToken
template<typename Connection,
typename Callable>
auto make_connection_attempt(Connection &connection,
Callable &&stopOnError) -> connection_attempt<Connection>
{
return connection_attempt<Connection>(connection,
std::forward<Callable>(stopOnError));
}
However, I want to be consistent using ASIO and the Universal Model for Asynchronous Operations: control flow on return should be customizable.
I have followed through the example for sending several messages with intervals using a composed operation with a stateful intermediate handler. The handler recursively passes itself as a handler for each next asynchronous operation: async_wait and async_write. These calls are always made in turns: one is always invoked when the other has returned. In my case, however, async_wait and async_connect are invoked simultaneously:
// initiation method, called first
void operator()(args...)
{
// not valid!
timer.async_wait(std::move(*this)); // from now on this is invalid
connection.async_connect(endpoint, std::move(*this)); can't move this twice
}
This is a code for a class I am trying to implement as an initiation and an intermediate handler:
template<typename Connection, typename CompletionToken>
class composed_connection_attempt
{
public:
using connection_type = Connection;
using endpoint_type = typename Connection::endpoint_type;
enum class state
{
pending,
connected,
timeout
};
constexpr static auto default_timeout()
{
return std::chrono::milliseconds(3000);
}
constexpr static size_t infinite_attempts()
{
return size_t() - 1;
}
// TODO: executor type
using executor_type = asio::associated_executor_t<CompletionToken,
typename connection_type::executor_type>;
executor_type get_executor() const noexcept
{
// TODO: get completion handler executor
return connection_.get_executor();
}
// TODO: allocator type
using allocator_type = typename asio::associated_allocator_t<CompletionToken,
std::allocator<void>>;
allocator_type get_allocator() const noexcept
{
// TODO: get completion handler allocator
return allocator_type();
}
// TODO: constructor to initialize state, pass timeout value?
explicit composed_connection_attempt(connection_type &connection)
: connection_(connection)
{}
template<typename Callable>
composed_connection_attempt(connection_type &connection, Callable &&stopOnError)
: connection_(connection),
stopOnError_(std::forward<Callable>(stopOnError))
{}
// operator for initiation
template<typename Endpoint, typename Duration>
void operator()(Endpoint &&endpoint,
size_t attempts,
Duration timeout = default_timeout())
{
// Start timer: how to pass this
// Attempt connection
}
// intermediate completion handler
// this may be invoked without an error both by the timer and a connection
void operator()(const asio::error_code &errorCode)
{
if (!errorCode)
{
}
}
private:
Connection &connection_;
asio::steady_timer timer_{this->get_executor()};
std::atomic<state> state_{state::pending};
std::function<bool(const asio::error_code &)> stopOnError_;
std::function<void(const asio::error_code &)> completionHandler_;
};
So, the problems I am trying to resolve:
How to share ownership of a stateful intermediate handler with both a timer and a connection (socket)? Maybe I have to use nested classes (main class for initiation and nested for timer and socket events)?
How to determine which of the asynchronous calls resulted in a void operator()(const asio::error_code&) invocation? No error might be the result of a successful connection or a timeout. Both asynchronous operations also can return asio::error::operation_aborted on cancelation: the connection attempt is cancelled on timeout, timer is cancelled on success or on connection error.
Finally got around to this one:
Wow. I've just created the same without using spawn (using an operation type that uses the State struct arguments as I mentioned). I must say the complexity if this kind of library-implementor-stuff keeps surprising me. I managed to avoid the overhead of shared_from_this though, and of course all demos still pass, so I'm pretty content. If you want I can post as an alternative answer. – sehe yesterday
The Initiation Function
The initiation function is roughly the same, except it no longer uses spawn (meaning that the user doesn't have to opt-in to Boost Coroutine and Boost Context).
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep, F&& stopOn,
Token&& token, int attempts = -1,
Timer::duration delay = 3s) {
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
using Op = mylib::connection_attempt_op<std::decay_t<F>, Completion>;
// make an owning self, to be passed along a single async call chain
auto self = std::make_unique<Op>(object, ep, std::forward<F>(stopOn), completion, attempts, delay);
(*self)(self);
return result.get();
}
Now, you'll immediately spot that I used a unique-ownership container (unique_ptr). I tried to avoid the dynamic allocation, by creating a value-semantics operation class that encapsulated the handler in move-only fashion.
However, the operation also owns the timer object which needs to be reference-stable across callbacks. So, moving is not an option. Of course, we could still have a movable-value-operation type that contained just a single unique_ptr for the _timer, but that's the same overhead and less generic.
if we add another IO object to the operation state we would require a more dynamic allocations
moving a unique_ptr is strictly cheaper than a state object a multiple the size
moving the object pointed to by this inside member functions is very error-prone. E.g., this would invoke undefined behaviour:
bind_executor(_ex, std::bind(std::move(*this), ...))
That's because _ex is actually this->_ex but the evaluation is not sequenced, so this->_ex might be evaluated after the move.
It's the kind of footgun we should not want.
if we implement other async operations we can use the same pattern.
The Operation Class
You will recognize this from your original code. I opted to use my own suggestion to select operator() overloads by dispatching on a marker "state" type:
struct Init {};
struct Attempt {};
struct Response {};
To aid in binding to ourself, we also pass the owning unique_ptr as the self argument:
using Self = std::unique_ptr<connection_attempt_op>;
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
Due to limitations in std::bind we can't pass actual rvalue parameters, but
if we're careful that the call chain is strictly sequential we can always move from self precisely once
due to the indirection of the unique_ptr it is still safe to use this from the method body after moving self
we can now capture the stable value of this in our completion handler for _timer.async_wait! As long as we guarantee that the completion handler doesn't outlive the lifetime of self, we don't have to share ownership here.
shared_ptr dependence averted!
With those in mind, I think the full implementation has few surprises:
namespace mylib { // implementation details
template <typename F, typename Completion> struct connection_attempt_op {
tcp::socket& _object;
tcp::endpoint _ep;
F _stopOn;
Completion _handler;
int _attempts;
Timer::duration _delay;
using executor_type =
asio::strand<std::decay_t<decltype(_object.get_executor())>>;
executor_type _ex;
std::unique_ptr<Timer> _timer;
executor_type const& get_executor() { return _ex; }
explicit connection_attempt_op(tcp::socket& object, tcp::endpoint ep,
F stopOn, Completion handler,
int attempts, Timer::duration delay)
: _object(object),
_ep(ep),
_stopOn(std::move(stopOn)),
_handler(std::move(handler)),
_attempts(attempts),
_delay(delay),
_ex(object.get_executor()) {}
struct Init {};
struct Attempt {};
struct Response {};
using Self = std::unique_ptr<connection_attempt_op>;
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
void operator()(Self& self, Init = {}) {
// This is the only invocation perhaps not yet on the strand, so
// dispatch
asio::dispatch(_ex, std::bind(Binder{std::move(self)}, Attempt{}));
}
void operator()(Self& self, Attempt) {
if (_attempts--) {
_timer = std::make_unique<Timer>(_ex, _delay);
_timer->async_wait([this](error_code ec) {
if (!ec) _object.cancel();
});
_object.async_connect(
_ep,
asio::bind_executor(
_ex, // _object may not already have been on strand
std::bind(Binder{std::move(self)}, Response{},
std::placeholders::_1)));
} else {
_handler(mylib::result_code::attempts_exceeded, false);
}
}
void operator()(Self& self, Response, error_code ec) {
if (!ec) {
_timer.reset();
return _handler(result_code::ok, true);
}
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (_stopOn && _stopOn(ec))
return _handler(ec, false);
_timer.reset();
_object.close();
operator()(self, Attempt{});
}
};
}
Do note the executor binding; the comment in the Init{} overload as well as with the bind_executor are relevant here.
The strand is essential to maintaining the lifetime guarantees that we needed w.r.t. the async_wait operation. In particular we need the handler ordering follow this
DEMO TIME
The rest of the code is 100% identical to the other answer, so let's present it without further comment:
Live On Wandbox
//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
//#define BOOST_ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
#include <iomanip>
#ifdef STANDALONE_ASIO
using std::error_category;
using std::error_code;
using std::error_condition;
using std::system_error;
#else
namespace asio = boost::asio;
using boost::system::error_category;
using boost::system::error_code;
using boost::system::error_condition;
using boost::system::system_error;
#endif
using namespace std::chrono_literals;
using asio::ip::tcp;
using Timer = asio::steady_timer;
namespace mylib { // threw in the kitchen sink for error codes
enum class result_code {
ok = 0,
timeout = 1,
attempts_exceeded = 2,
not_implemented = 3,
};
auto const& get_result_category() {
struct impl : error_category {
const char* name() const noexcept override { return "result_code"; }
std::string message(int ev) const override {
switch (static_cast<result_code>(ev)) {
case result_code::ok: return "success";
case result_code::attempts_exceeded:
return "the maximum number of attempts was exceeded";
case result_code::timeout:
return "the operation did not complete in time";
case result_code::not_implemented:
return "feature not implemented";
default: return "unknown error";
}
}
error_condition
default_error_condition(int ev) const noexcept override {
return error_condition{ev, *this};
}
bool equivalent(int ev, error_condition const& condition)
const noexcept override {
return condition.value() == ev && &condition.category() == this;
}
bool equivalent(error_code const& error,
int ev) const noexcept override {
return error.value() == ev && &error.category() == this;
}
} const static instance;
return instance;
}
error_code make_error_code(result_code se) {
return error_code{
static_cast<std::underlying_type<result_code>::type>(se),
get_result_category()};
}
} // namespace mylib
template <>
struct boost::system::is_error_code_enum<mylib::result_code>
: std::true_type {};
namespace mylib { // implementation details
template <typename F, typename Completion> struct connection_attempt_op {
tcp::socket& _object;
tcp::endpoint _ep;
F _stopOn;
Completion _handler;
int _attempts;
Timer::duration _delay;
using executor_type =
asio::strand<std::decay_t<decltype(_object.get_executor())>>;
executor_type _ex;
std::unique_ptr<Timer> _timer;
executor_type const& get_executor() { return _ex; }
explicit connection_attempt_op(tcp::socket& object, tcp::endpoint ep,
F stopOn, Completion handler,
int attempts, Timer::duration delay)
: _object(object),
_ep(ep),
_stopOn(std::move(stopOn)),
_handler(std::move(handler)),
_attempts(attempts),
_delay(delay),
_ex(object.get_executor()) {}
struct Init {};
struct Attempt {};
struct Response {};
using Self = std::unique_ptr<connection_attempt_op>;
struct Binder {
Self _self;
template <typename... Args>
decltype(auto) operator()(Args&&... args) {
return (*_self)(_self, std::forward<Args>(args)...);
}
};
void operator()(Self& self, Init = {}) {
// This is the only invocation perhaps not yet on the strand, so
// dispatch
asio::dispatch(_ex, std::bind(Binder{std::move(self)}, Attempt{}));
}
void operator()(Self& self, Attempt) {
if (_attempts--) {
_timer = std::make_unique<Timer>(_ex, _delay);
_timer->async_wait([this](error_code ec) {
if (!ec) _object.cancel();
});
_object.async_connect(
_ep,
asio::bind_executor(
_ex, // _object may not already have been on strand
std::bind(Binder{std::move(self)}, Response{},
std::placeholders::_1)));
} else {
_handler(mylib::result_code::attempts_exceeded, false);
}
}
void operator()(Self& self, Response, error_code ec) {
if (!ec) {
_timer.reset();
return _handler(result_code::ok, true);
}
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (_stopOn && _stopOn(ec))
return _handler(ec, false);
_timer.reset();
_object.close();
operator()(self, Attempt{});
}
};
}
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep, F&& stopOn,
Token&& token, int attempts = -1,
Timer::duration delay = 3s) {
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
using Op = mylib::connection_attempt_op<std::decay_t<F>, Completion>;
// make an owning self, to be passed along a single async call chain
auto self = std::make_unique<Op>(object, ep, std::forward<F>(stopOn), completion, attempts, delay);
(*self)(self);
return result.get();
}
static auto non_recoverable = [](error_code ec) {
std::cerr << "Checking " << std::quoted(ec.message()) << "\n";
// TODO Be specific about intermittent/recoverable conditions
return false;
};
#include <set>
int main(int argc, char** argv) {
assert(argc>1);
static const tcp::endpoint ep{asio::ip::make_address(argv[1]),
8989};
std::set<std::string_view> const options{argv+1, argv+argc};
std::cout << std::boolalpha;
if (options.contains("future")) {
std::cout
<< "-----------------------\n"
<< " FUTURE DEMO\n"
<< "-----------------------" << std::endl;
asio::thread_pool ctx;
try {
tcp::socket s(ctx);
std::future<bool> ok = async_connection_attempt(
s, ep, non_recoverable, asio::use_future, 5, 800ms);
std::cout << "Future: " << ok.get() << ", " << s.is_open() << "\n";
} catch (system_error const& se) {
std::cout << "Future: " << se.code().message() << "\n";
}
ctx.join();
}
if (options.contains("coroutine")) {
std::cout
<< "-----------------------\n"
<< " COROUTINE DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
asio::spawn(ctx,
[work = make_work_guard(ctx)](asio::yield_context yc) {
auto ex = get_associated_executor(yc);
tcp::socket s(ex);
error_code ec;
if (async_connection_attempt(s, ep, non_recoverable,
yc[ec], 5, 800ms)) {
std::cout << "Connected in coro\n";
} else {
std::cout << "NOT Connected in coro: " << ec.message() << "\n";
}
});
ctx.run();
}
if (options.contains("callback")) {
std::cout
<< "-----------------------\n"
<< " CALLBACK DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
tcp::socket s(ctx);
async_connection_attempt(
s, ep, non_recoverable,
[](error_code ec, bool ok) {
std::cout << "Callback: " << ok << ", "
<< ec.message() << "\n";
},
5, 800ms);
ctx.run();
}
}
Here's another local demo with different scenarios:
So, for the second question I suggested a discriminating argument (sometimes I use a empty "state struct", like State::Init{} or State::Timeout{} to aid in overload resolution as well as self-documentation).
For the first question I'm sure you may have run into std::enable_shared_from_this since.
Here's my take on the "Universal Model". I used spawn for ease of exposition.
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep,
F&& stopOn, Token&& token,
int attempts = -1,
Timer::duration delay = 3s)
{
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
asio::spawn(
object.get_executor(),
[=, &object](asio::yield_context yc) mutable {
using mylib::result_code;
auto ex = get_associated_executor(yc);
error_code ec;
while (attempts--) {
Timer t(ex, delay);
t.async_wait([&](error_code ec) { if (!ec) object.cancel(); });
object.async_connect(ep, yc[ec]);
if(!ec)
return completion(result_code::ok, true);
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (stopOn && stopOn(ec))
return completion(ec, false);
object.close();
}
return completion(result_code::attempts_exceeded, false);
});
return result.get();
}
The key things to note are:
the async_result<> protocol will give you a completion handler that "does the magic" required by the caller (use_future, yield_context etc.)
you should be able to get away without sharing a reference as the timer can "just" have a raw pointer: the timer's lifetime is completely owned by the containing composed operation.
Full Demo: callbacks, coroutines and futures
I threw in an mylib::result_code enum to be able to return full error information:
Live On Wandbox
//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <iostream>
#include <iomanip>
#ifdef STANDALONE_ASIO
using std::error_category;
using std::error_code;
using std::error_condition;
using std::system_error;
#else
namespace asio = boost::asio;
using boost::system::error_category;
using boost::system::error_code;
using boost::system::error_condition;
using boost::system::system_error;
#endif
using namespace std::chrono_literals;
using asio::ip::tcp;
using Timer = asio::steady_timer;
namespace mylib { // threw in the kitchen sink for error codes
enum class result_code {
ok = 0,
timeout = 1,
attempts_exceeded = 2,
};
auto const& get_result_category() {
struct impl : error_category {
const char* name() const noexcept override { return "result_code"; }
std::string message(int ev) const override {
switch (static_cast<result_code>(ev)) {
case result_code::ok: return "success";
case result_code::attempts_exceeded:
return "the maximum number of attempts was exceeded";
case result_code::timeout:
return "the operation did not complete in time";
default: return "unknown error";
}
}
error_condition
default_error_condition(int ev) const noexcept override {
return error_condition{ev, *this};
}
bool equivalent(int ev, error_condition const& condition)
const noexcept override {
return condition.value() == ev && &condition.category() == this;
}
bool equivalent(error_code const& error,
int ev) const noexcept override {
return error.value() == ev && &error.category() == this;
}
} const static instance;
return instance;
}
error_code make_error_code(result_code se) {
return error_code{
static_cast<std::underlying_type<result_code>::type>(se),
get_result_category()};
}
} // namespace mylib
template <>
struct boost::system::is_error_code_enum<mylib::result_code>
: std::true_type {};
template <typename F, typename Token>
auto async_connection_attempt(tcp::socket& object, tcp::endpoint ep,
F&& stopOn, Token&& token,
int attempts = -1,
Timer::duration delay = 3s)
{
using Result = asio::async_result<std::decay_t<Token>,
void(error_code, bool)>;
using Completion = typename Result::completion_handler_type;
Completion completion(std::forward<Token>(token));
Result result(completion);
asio::spawn(
object.get_executor(),
[=, &object](asio::yield_context yc) mutable {
using mylib::result_code;
auto ex = get_associated_executor(yc);
error_code ec;
while (attempts--) {
Timer t(ex, delay);
t.async_wait([&](error_code ec) { if (!ec) object.cancel(); });
object.async_connect(ep, yc[ec]);
if(!ec)
return completion(result_code::ok, true);
if (ec == asio::error::operation_aborted) {
ec = result_code::timeout;
}
if (stopOn && stopOn(ec))
return completion(ec, false);
object.close();
}
return completion(result_code::attempts_exceeded, false);
});
return result.get();
}
static auto non_recoverable = [](error_code ec) {
std::cerr << "Checking " << std::quoted(ec.message()) << "\n";
// TODO Be specific about intermittent/recoverable conditions
return false;
};
#include <set>
int main(int argc, char** argv) {
assert(argc>1);
static const tcp::endpoint ep{asio::ip::make_address(argv[1]),
8989};
std::set<std::string_view> const options{argv+1, argv+argc};
std::cout << std::boolalpha;
if (options.contains("future")) {
std::cout
<< "-----------------------\n"
<< " FUTURE DEMO\n"
<< "-----------------------" << std::endl;
asio::thread_pool ctx;
try {
tcp::socket s(ctx);
std::future<bool> ok = async_connection_attempt(
s, ep, non_recoverable, asio::use_future, 5, 800ms);
std::cout << "Future: " << ok.get() << ", " << s.is_open() << "\n";
} catch (system_error const& se) {
std::cout << "Future: " << se.code().message() << "\n";
}
ctx.join();
}
if (options.contains("coroutine")) {
std::cout
<< "-----------------------\n"
<< " COROUTINE DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
asio::spawn(ctx,
[work = make_work_guard(ctx)](asio::yield_context yc) {
auto ex = get_associated_executor(yc);
tcp::socket s(ex);
error_code ec;
if (async_connection_attempt(s, ep, non_recoverable,
yc[ec], 5, 800ms)) {
std::cout << "Connected in coro\n";
} else {
std::cout << "NOT Connected in coro: " << ec.message() << "\n";
}
});
ctx.run();
}
if (options.contains("callback")) {
std::cout
<< "-----------------------\n"
<< " CALLBACK DEMO\n"
<< "-----------------------" << std::endl;
asio::io_context ctx;
tcp::socket s(ctx);
async_connection_attempt(
s, ep, non_recoverable,
[](error_code ec, bool ok) {
std::cout << "Callback: " << ok << ", "
<< ec.message() << "\n";
},
5, 800ms);
ctx.run();
}
}
Sample output is on the online compiler, or compare some tests on my machine:
This is how I implemented it. Code with tests can be found here on github
template<typename Connection, typename CompletionHandler>
class composed_connection_attempt
{
public:
using connection_type = Connection;
using endpoint_type = typename Connection::endpoint_type;
// TODO: clarify the type!
using completion_handler_t = CompletionHandler;
constexpr static auto default_timeout()
{
return std::chrono::milliseconds(3000);
}
constexpr static size_t infinite_attempts()
{
return size_t() - 1;
}
using executor_type = asio::associated_executor_t<
typename std::decay<CompletionHandler>::type,
typename connection_type::executor_type>;
executor_type get_executor() const noexcept
{
// TODO: get completion handler executor
return pImpl_->get_executor();
}
// TODO: allocator type
using allocator_type = typename asio::associated_allocator_t<CompletionHandler,
std::allocator<void>>;
allocator_type get_allocator() const noexcept
{
// TODO: get completion handler allocator
return pImpl_->get_allocator();
}
// TODO: constructor to initialize state, pass timeout value?
template<typename CompletionHandlerT>
explicit composed_connection_attempt(connection_type &connection,
CompletionHandlerT &&completionHandler)
: pImpl_(std::make_shared<impl>(connection,
std::forward<CompletionHandlerT>(completionHandler)))
{}
template<typename CompletionHandlerT,
typename Callable>
explicit composed_connection_attempt(connection_type &connection,
CompletionHandlerT &&completionHandler,
Callable &&stopOnError)
: pImpl_(std::make_shared<impl>(connection,
std::forward<CompletionHandlerT>(completionHandler),
std::forward<Callable>(stopOnError)))
{}
/**
* Initiation operator. Initiates composed connection procedure.
* #tparam Endpoint type of endpoint
* #tparam Duration type of timeout
* #param endpoint endpoint to be used for connection
* #param attempts number of attempts
* #param timeout value to be used as a timeout between attempts
*/
// TODO: require endpoint type
template<typename Endpoint, typename Duration>
void operator()(Endpoint &&endpoint,
size_t attempts,
Duration &&timeout = default_timeout())
{
pImpl_->endpoint_ = std::forward<Endpoint>(endpoint);
pImpl_->attempts_ = attempts;
pImpl_->timeout_ = std::forward<Duration>(timeout);
asyncConnect();
}
/**
* Initiation operator. Initiates composed connection procedure. Connection attempts default to infinite.
* #tparam Endpoint type of endpoint
* #tparam Duration type of timeout
* #param endpoint endpoint to be used for connection
* #param timeout value to be used as a timeout between attempts
*/
// TODO: require endpoint type
template<typename Endpoint, typename Duration>
void operator()(Endpoint &&endpoint,
Duration &&timeout = default_timeout())
{
pImpl_->endpoint_ = std::forward<Endpoint>(endpoint);
pImpl_->timeout_ = std::forward<Duration>(timeout);
asyncConnect();
}
/**
* Intermediate completion handler. Will be trying to connect until:<br>
* - has connected<br>
* - has run out of attempts<br>
* - user-provided callback #impl::stopOnError_ interrupts execution when a specific connection error has occurred<br>
* <br>Will be invoked only on connection events:<br>
* - success<br>
* - connection timeout or operation_cancelled in case if timer has expired<br>
* - connection errors<br>
* #param errorCode error code resulted from async_connect
*/
void operator()(const asio::error_code &errorCode)
{
if (!errorCode)
{
stopTimer();
pImpl_->completionHandler_(errorCode);
return;
}
const auto attemptsLeft = pImpl_->attempts_ == infinite_attempts() ?
infinite_attempts() :
pImpl_->attempts_ - 1;
if ((pImpl_->stopOnError_ &&
pImpl_->stopOnError_(errorCode == asio::error::operation_aborted ?
// special case for operation_aborted on timer expiration - need to send timed_out explicitly
// this should only be resulted from the timer calling cancel()
asio::error::timed_out :
errorCode)) ||
!attemptsLeft)
{
stopTimer();
pImpl_->completionHandler_(errorCode == asio::error::operation_aborted ?
asio::error::timed_out :
errorCode);
return;
}
pImpl_->attempts_ = attemptsLeft;
asyncConnect();
}
private:
struct impl
{
template<typename CompletionHandlerT>
impl(connection_type &connection,
CompletionHandlerT &&completionHandler)
: connection_(connection),
completionHandler_(std::forward<CompletionHandlerT>(completionHandler))
{}
template<typename CompletionHandlerT, typename Callable>
impl(connection_type &connection,
CompletionHandlerT &&completionHandler,
Callable &&stopOnError)
: connection_(connection),
completionHandler_(std::forward<CompletionHandlerT>(completionHandler)),
stopOnError_(std::forward<Callable>(stopOnError))
{}
executor_type get_executor() const noexcept
{
return asio::get_associated_executor(completionHandler_,
connection_.get_executor());
}
allocator_type get_allocator() const noexcept
{
// TODO: get completion handler allocator
return allocator_type();
}
connection_type &connection_;
completion_handler_t completionHandler_;
std::function<bool(const asio::error_code &)> stopOnError_;
// this should be default constructable or should I pass it in the constructor?
endpoint_type endpoint_;
// TODO: make timer initialization from get_executor()
asio::steady_timer timer_{connection_.get_executor()}; // this does not compile! -> {get_executor()};
asio::steady_timer::duration timeout_ = default_timeout();
size_t attempts_ = infinite_attempts();
};
// TODO: make unique?
std::shared_ptr<impl> pImpl_;
// cancels the connection on timeout!
void startTimer()
{
pImpl_->timer_.expires_after(pImpl_->timeout_); // it will automatically cancel a pending timer
pImpl_->timer_.async_wait(
[pImpl = pImpl_](const asio::error_code &errorCode)
{
// will occur on connection error before timeout
if (errorCode == asio::error::operation_aborted)
return;
// TODO: handle timer errors? What are the possible errors?
assert(!errorCode && "unexpected timer error!");
// stop attempts
pImpl->connection_.cancel();
});
}
void stopTimer()
{
pImpl_->timer_.cancel();
}
/**
* Will be trying to connect until:<br>
* - has run out of attempts
* - has been required to stop by stopOnError callback (if it was set)
* #param endpoint
* #param attempts
*/
void asyncConnect()
{
startTimer();
pImpl_->connection_.async_connect(pImpl_->endpoint_, std::move(*this));
}
};
template<typename Connection,
typename CompletionHandler,
typename Callable>
auto make_composed_connection_attempt(Connection &connection,
CompletionHandler &&completionHandler,
Callable &&stopOnError) ->
composed_connection_attempt<Connection, CompletionHandler>
{
return composed_connection_attempt<Connection, CompletionHandler>(connection,
std::forward<CompletionHandler>(
completionHandler),
std::forward<Callable>(stopOnError));
}
template<typename Connection,
typename Endpoint,
typename Duration,
typename CompletionToken,
typename Callable>
auto async_connection_attempt(Connection &connection,
Endpoint &&endpoint,
size_t attempts,
Duration &&timeout,
CompletionToken &&completionToken,
Callable &&stopOnError)
{
using result_t = asio::async_result<std::decay_t<CompletionToken>,
void(asio::error_code)>;
using completion_t = typename result_t::completion_handler_type;
completion_t completion{std::forward<CompletionToken>(completionToken)};
result_t result{completion};
auto composedConnectionAttempt = make_composed_connection_attempt(connection,
std::forward<completion_t>(completion),
std::forward<Callable>(stopOnError));
composedConnectionAttempt(std::forward<Endpoint>(endpoint),
attempts,
std::forward<Duration>(timeout));
return result.get();
}