How to close async client connection in ASIO? - c++

I'm trying to create a client for the C++ 20 server example, the one that uses coroutines.
I'm not quite sure how I'm supposed to close the client connection. As far as I'm aware, there are two ways:
#1
This one seems to be closing it once it's ready/there is nothing else to do like read/write operations.
asio::signal_set signals(io_context, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto) { io_context.stop(); });
#2
Force close?
asio::post(io_context_, [this]() { socket_.close(); });
Which one should I use?
Client code (unfinished)
#include <cstdlib>
#include <deque>
#include <iostream>
#include <thread>
#include <string>
#include <asio.hpp>
using asio::ip::tcp;
using asio::awaitable;
using asio::co_spawn;
using asio::detached;
using asio::redirect_error;
using asio::use_awaitable;
awaitable<void> connect(tcp::socket socket, const tcp::endpoint& endpoint)
{
co_await socket.async_connect(endpoint, use_awaitable);
}
int main()
{
try
{
asio::io_context io_context;
tcp::endpoint endpoint(asio::ip::make_address("127.0.0.1"), 666);
tcp::socket socket(io_context);
co_spawn(io_context, connect(std::move(socket), endpoint), detached);
io_context.run();
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
Server code
#include <cstdlib>
#include <deque>
#include <iostream>
#include <list>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <asio/awaitable.hpp>
#include <asio/detached.hpp>
#include <asio/co_spawn.hpp>
#include <asio/io_context.hpp>
#include <asio/ip/tcp.hpp>
#include <asio/read_until.hpp>
#include <asio/redirect_error.hpp>
#include <asio/signal_set.hpp>
#include <asio/steady_timer.hpp>
#include <asio/use_awaitable.hpp>
#include <asio/write.hpp>
using asio::ip::tcp;
using asio::awaitable;
using asio::co_spawn;
using asio::detached;
using asio::redirect_error;
using asio::use_awaitable;
//----------------------------------------------------------------------
class chat_participant
{
public:
virtual ~chat_participant() = default;
virtual void deliver(const std::string& msg) = 0;
};
typedef std::shared_ptr<chat_participant> chat_participant_ptr;
//----------------------------------------------------------------------
class chat_room
{
public:
void join(chat_participant_ptr participant)
{
participants_.insert(participant);
for (const auto &msg : recent_msgs_)
participant->deliver(msg);
}
void leave(chat_participant_ptr participant)
{
participants_.erase(participant);
}
void deliver(const std::string& msg)
{
recent_msgs_.push_back(msg);
while (recent_msgs_.size() > max_recent_msgs)
recent_msgs_.pop_front();
for (const auto &participant : participants_)
participant->deliver(msg);
}
private:
std::set<chat_participant_ptr> participants_;
enum { max_recent_msgs = 100 };
std::deque<std::string> recent_msgs_;
};
//----------------------------------------------------------------------
class chat_session
: public chat_participant,
public std::enable_shared_from_this<chat_session>
{
public:
chat_session(tcp::socket socket, chat_room& room)
: socket_(std::move(socket)),
timer_(socket_.get_executor()),
room_(room)
{
timer_.expires_at(std::chrono::steady_clock::time_point::max());
}
void start()
{
room_.join(shared_from_this());
co_spawn(socket_.get_executor(),
[self = shared_from_this()]{ return self->reader(); },
detached);
co_spawn(socket_.get_executor(),
[self = shared_from_this()]{ return self->writer(); },
detached);
}
void deliver(const std::string& msg) override
{
write_msgs_.push_back(msg);
timer_.cancel_one();
}
private:
awaitable<void> reader()
{
try
{
for (std::string read_msg;;)
{
std::size_t n = co_await asio::async_read_until(socket_,
asio::dynamic_buffer(read_msg, 1024), "\n", use_awaitable);
room_.deliver(read_msg.substr(0, n));
read_msg.erase(0, n);
}
}
catch (std::exception&)
{
stop();
}
}
awaitable<void> writer()
{
try
{
while (socket_.is_open())
{
if (write_msgs_.empty())
{
asio::error_code ec;
co_await timer_.async_wait(redirect_error(use_awaitable, ec));
}
else
{
co_await asio::async_write(socket_,
asio::buffer(write_msgs_.front()), use_awaitable);
write_msgs_.pop_front();
}
}
}
catch (std::exception&)
{
stop();
}
}
void stop()
{
room_.leave(shared_from_this());
socket_.close();
timer_.cancel();
}
tcp::socket socket_;
asio::steady_timer timer_;
chat_room& room_;
std::deque<std::string> write_msgs_;
};
//----------------------------------------------------------------------
awaitable<void> listener(tcp::acceptor acceptor)
{
chat_room room;
for (;;)
{
std::make_shared<chat_session>(co_await acceptor.async_accept(use_awaitable), room)->start();
}
}
//----------------------------------------------------------------------
int main()
{
try
{
unsigned short port = 666;
asio::io_context io_context(1);
co_spawn(io_context,
listener(tcp::acceptor(io_context, { tcp::v4(), port })),
detached);
asio::signal_set signals(io_context, SIGINT, SIGTERM);
signals.async_wait([&](auto, auto) { io_context.stop(); });
io_context.run();
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}

In the example provided by asio, the listener runs within the io_context thread/thread-pool, which is started by run() and given a thread-pool size when constructing the io_context(1 /* pool of 1 */).
The listener will use an acceptor to listen for new connections from within the io_context. The acceptor will create a new chat_session for each new socket connection and will hand it over to the chat_room.
Thus, to safely close a connection, you need to post a lambda to asio. The asio::post will queue the lambda to be done from within the io_context thread(s).
You need to provided the correct io_context and the socket owned by the chat_session. The connection MUST be closed from within the io_context as follows:
// Where "this" is the current chat_session owning the socket
asio::post(io_context_, [this]() { socket_.close(); });
The io_context wil then close the connection and also call any active registered async_read / async_write methods of the chat_session such as in the c++11 example:
void do_read()
{
asio::async_read(socket_,
asio::buffer(read_msg_.data(), chat_message::header_length),
/* You can provide a lambda to be called on a read / error */
[this](std::error_code ec, std::size_t /*length read*/)
{
if (!ec)
{
do_read(); // No error -> Keep on reading
}
else
{
// You'll reach this point if an active async_read was stopped
// due to an error or if you called socket_.close()
// Error -> You can close the socket here as well,
// because it is called from within the io_context
socket_.close();
}
});
}
Your first option will actually stop the entire io_context. This should be used to gracefully exit your program or stop the asio io_context as a whole.
You should thus use the second option to "close an async client connection in ASIO".

Related

Boost beast service returns "body limit exceeded" when receiving a json payload

I'm working on a project which implement a boost beast service.This part of code was written by a person who left the company and I do not master boot.
Until now it worked well but the size of the payload has increased and it no longer works. The payload is about 2.4MB.
The service is implemented using 3 classes ServerService, Listener and Session.
ServerService:
void ServerService::startServer(const std::string& address, const unsigned short& port,
const std::string& baseRessourceName, const unsigned short& threadNumber)
{
try
{
const auto srvAddress = boost::asio::ip::make_address(address);
// The io_context is required for all I/O
auto const nbThreads = std::max<int>(1, threadNumber);
boost::asio::io_context ioContext(nbThreads);
// Create listener and launch a listening port
std::shared_ptr<Listener> listener = std::make_shared<Listener>(ioContext, tcp::endpoint{ srvAddress, port }, baseRessourceName);
listener->run();
// Run the I/O service on the requested number of threads
std::vector<std::thread> threads;
threads.reserve(nbThreads - 1);
for (auto i = nbThreads - 1; i > 0; --i)
{
threads.emplace_back([&ioContext] { ioContext.run(); });
}
ioContext.run();
}
catch (std::exception const& e)
{
LBC_ERROR("{}", e.what());
}
}
Listener:
// Used namespace
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
namespace Http
{
class Listener : public std::enable_shared_from_this<Listener>
{
private:
tcp::acceptor m_acceptor;
tcp::socket m_socket;
std::string const& m_baseResourceName;
// Report a failure
void logError(boost::system::error_code errorCode, char const* what)
{
LBC_ERROR("{}: {}", what, errorCode.message());
}
public:
Listener(boost::asio::io_context& ioContext, tcp::endpoint endpoint, std::string const& docRoot)
: m_acceptor(ioContext)
, m_socket(ioContext)
, m_baseResourceName(docRoot)
{
boost::system::error_code errorCode;
// Open the acceptor
m_acceptor.open(endpoint.protocol(), errorCode);
if (errorCode)
{
logError(errorCode, "open");
return;
}
// Allow address reuse
m_acceptor.set_option(boost::asio::socket_base::reuse_address(true));
if (errorCode)
{
logError(errorCode, "set_option");
return;
}
// Bind to the server address
m_acceptor.bind(endpoint, errorCode);
if (errorCode)
{
logError(errorCode, "bind");
return;
}
// Start listening for connections
m_acceptor.listen(boost::asio::socket_base::max_listen_connections, errorCode);
if (errorCode)
{
logError(errorCode, "listen");
return;
}
}
// Start accepting incoming connections
void run()
{
if (!m_acceptor.is_open()) {
return;
}
doAccept();
}
void doAccept()
{
m_acceptor.async_accept(m_socket,
std::bind(
&Listener::onAccept,
shared_from_this(),
std::placeholders::_1));
}
void onAccept(boost::system::error_code errorCode)
{
if (errorCode)
{
logError(errorCode, "accept");
}
else
{
// Create the session and run it
std::make_shared<Session>(
std::move(m_socket),
m_baseResourceName)->run();
}
// Accept another connection
doAccept();
}
};
} // namespace Http
Session:
// Used namespaces
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
namespace boostHttp = boost::beast::http; // from <boost/beast/http.hpp>
namespace Http
{
class Session : public std::enable_shared_from_this<Session>
{
private:
// This is the C++11 equivalent of a generic lambda.
// The function object is used to send an HTTP message.
struct send_lambda
{
Session& self_;
explicit send_lambda(Session& self) : self_(self) {}
template<bool isRequest, class Body, class Fields>
void operator()(boostHttp::message<isRequest, Body, Fields>&& msg) const
{
// The lifetime of the message has to extend
// for the duration of the async operation so
// we use a shared_ptr to manage it.
auto sp = std::make_shared<boostHttp::message<isRequest, Body, Fields>>(std::move(msg));
// Store a type-erased version of the shared
// pointer in the class to keep it alive.
self_.res_ = sp;
// Write the response
boostHttp::async_write(self_.socket_, *sp,
boost::asio::bind_executor(
self_.strand_, std::bind(
&Session::onWrite,
self_.shared_from_this(),
std::placeholders::_1,
std::placeholders::_2,
sp->need_eof())));
}
};
// Report a failure
void logError(boost::system::error_code errorCode, char const* what)
{
LBC_ERROR("{}: {}", what, errorCode.message());
}
tcp::socket socket_;
boost::asio::strand<boost::asio::any_io_executor> strand_;
boost::beast::flat_buffer buffer_;
std::string const& baseResourceName_;
boostHttp::request<boostHttp::string_body> req_;
std::shared_ptr<void> res_;
send_lambda lambda_;
public:
// Take ownership of the socket
explicit Session(tcp::socket socket, std::string const& docRoot)
: socket_(std::move(socket))
, strand_(socket_.get_executor())
, baseResourceName_(docRoot)
, lambda_(*this)
{}
// Start the asynchronous operation
void run()
{
doRead();
}
void doRead()
{
// Make the request empty before reading,
// otherwise the operation behavior is undefined.
req_ = {};
// Read a request
boostHttp::async_read(socket_, buffer_, req_,
boost::asio::bind_executor(
strand_, std::bind(
&Session::onRead,
shared_from_this(),
std::placeholders::_1,
std::placeholders::_2)));
}
void onRead(boost::system::error_code errorCode, std::size_t transferredBytes)
{
boost::ignore_unused(transferredBytes);
// This means they closed the connection
if (errorCode == boostHttp::error::end_of_stream)
{
return doClose();
}
if (errorCode) {
return logError(errorCode, "*** read"); // Error is here
}
// Some stuff here to manage request
}
void onWrite(boost::system::error_code ec, std::size_t transferredBytes, bool close)
{
boost::ignore_unused(transferredBytes);
if (ec)
{
return logError(ec, "write");
}
if (close)
{
// This means we should close the connection, usually because
// the response indicated the "Connection: close" semantic.
return doClose();
}
// We're done with the response so delete it
res_ = nullptr;
// Read another request
doRead();
}
void doClose()
{
// Send a TCP shutdown
boost::system::error_code ec;
socket_.shutdown(tcp::socket::shutdown_send, ec);
// At this point the connection is closed gracefully
}
};
} // namespace Http
The service is launched as follow:
Service::ServerService serverService;
serverService.startServer("127.0.0.1", 8080, "service_name", 5);
I saw in the boost documentation that the default limit is 1MB. I tried some examples found on the internet to implement a parser and change the body limit but when I send a payload I get the following error "Unknown HTTP request" !
I hope someone can help me solve this problem. Thank you in advance for your answers.
First I made your code self-contained, more modern, simpler and stripped unused code. I chose libfmt to implement the logging requirements, showing how to use source location instead of tediously providing manual context.
Live On Coliru
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include <iostream>
namespace beast = boost::beast;
namespace http = beast::http;
namespace net = boost::asio;
using boost::system::error_code;
using net::ip::tcp;
#include <fmt/ranges.h>
#include <fmt/ostream.h>
template <> struct fmt::formatter<boost::source_location> : fmt::ostream_formatter {};
#define LBC_ERROR(FMTSTR, ...) fmt::print(stderr, FMTSTR "\n", __VA_ARGS__)
// Report a failure
static void inline logError(error_code ec, char const* what) {
LBC_ERROR("{}: {} from {}", what, ec.message(), ec.location());
}
static void inline logError(std::exception const& e) { logError({}, e.what()); }
namespace Http {
using namespace std::placeholders;
using Executor = net::any_io_executor;
class Session : public std::enable_shared_from_this<Session> {
private:
tcp::socket socket_;
std::string baseResourceName_; // TODO FIXME unused
boost::beast::flat_buffer buffer_;
http::request<http::string_body> req_;
public:
// Take ownership of the socket
explicit Session(tcp::socket socket, std::string docRoot)
: socket_(std::move(socket))
, baseResourceName_(std::move(docRoot)) {}
void run() {
std::cerr << "Started session for " << socket_.remote_endpoint() << std::endl;
doRead();
}
~Session() {
error_code ec;
auto ep = socket_.remote_endpoint(ec);
std::cerr << "Close session for " << ep << std::endl;
}
private:
void doRead() {
// Make the request empty before reading, otherwise the operation
// behavior is undefined.
req_.clear();
// Read a request
http::async_read(socket_, buffer_, req_,
std::bind(&Session::onRead, shared_from_this(), _1, _2));
}
void onRead(error_code ec, size_t transferredBytes) {
boost::ignore_unused(transferredBytes);
// This means they closed the connection
if (ec == http::error::end_of_stream) {
return doClose();
}
if (ec) {
return logError(ec, "*** read"); // Error is here
}
// Some stuff here to manage request
}
void onWrite(error_code ec, size_t transferredBytes, bool close) {
boost::ignore_unused(transferredBytes);
if (ec) {
return logError(ec, "write");
}
if (close) {
// This means we should close the connection, usually because
// the response indicated the "Connection: close" semantic.
return doClose();
}
// Read another request
doRead();
}
void doClose() {
// Send a TCP shutdown
error_code ec;
socket_.shutdown(tcp::socket::shutdown_send, ec);
// At this point the connection is closed gracefully
}
};
} // namespace Http
namespace Http {
class Listener : public std::enable_shared_from_this<Listener> {
private:
tcp::acceptor m_acceptor;
std::string m_baseResourceName;
public:
Listener(Executor ex, tcp::endpoint endpoint, std::string docRoot) try
: m_acceptor(ex)
, m_baseResourceName(std::move(docRoot)) //
{
m_acceptor.open(endpoint.protocol());
m_acceptor.set_option(tcp::acceptor::reuse_address(true));
m_acceptor.bind(endpoint);
m_acceptor.listen(tcp::socket::max_listen_connections);
} catch (boost::system::system_error const& se) {
logError(se.code(), "Listener");
throw;
}
// Start accepting incoming connections
void run() {
if (m_acceptor.is_open())
doAccept();
}
void doAccept() {
m_acceptor.async_accept(make_strand(m_acceptor.get_executor()),
std::bind(&Listener::onAccept, shared_from_this(), _1, _2));
}
void onAccept(error_code ec, tcp::socket sock) {
if (ec)
return logError(ec, "accept");
// Accept another connection / Create the session and run it
doAccept();
std::make_shared<Session>(std::move(sock), m_baseResourceName)->run();
}
};
void startServer(std::string address, uint16_t port, std::string docRoot, unsigned threads) {
try {
net::thread_pool ioc(std::max(1u, threads));
// Create listener and launch a listening port
tcp::endpoint ep{net::ip::make_address(address), port};
std::make_shared<Listener>( //
ioc.get_executor(), ep, std::move(docRoot))
->run();
// Run the I/O service on the requested number of threads
ioc.join();
} catch (std::exception const& e) {
logError(e);
}
}
} // namespace Http
int main() {
//Service::ServerService serverService;
/*serverService.*/ Http::startServer("127.0.0.1", 8989, "service_name", 5);
}
Particularly the send_lambda is not outdated (besides being unused), see message_generator instead
Reproducing
I can reproduce the error by replacing the data with something large enough:
Live On Coliru
dd of=test.bin seek=3 bs=1M count=0 status=none
curl -s http://127.0.0.1:8989/blrub -d #test.bin
Prints
Started session for 127.0.0.1:48884
*** read: body limit exceeded from (unknown source location)
Close session for 127.0.0.1:48884
Fixing
Indeed, you can set options on request_parser. Three lines of code changed:
http::request_parser<http::string_body> req_;
And
req_.get().clear();
req_.body_limit(8*1024*1024); // raised to 8Mb
Live On Coliru
With no further changes:
Prints
Started session for 127.0.0.1:48886
Close session for 127.0.0.1:48886

C++ Boost::ASIO: system error 995 after second call to io_context::run

I've got troubles with following scenario using asio 1.66.0 Windows implementation
bind socket
run io_context
stop io_context
close socket
restart io_context
repeat 1-4
A call to io_context::run in second iteration is followed by system error 995
The I/O operation has been aborted because of either a thread exit or
an applica tion request
Looks like this error is from socket closure: asio uses PostQueuedCompletionStatus/GetQueuedCompletionStatus to signal itself that io_context::stop was called. But I/O operation, enqueued by WSARecvFrom in socket_.async_receive_from, is failed because of socket is closed and in the next call to io_context::run it is the first what I get in handler passed to socket_.async_receive_from.
Is it intended behavior of asio io_context? How do I avoid this error in sequential iterations?
If I stop io_context::run by closing the socket, all works except there will be same error and it looks little dirty.
Another odd thing is if I proceed with do_receive after error receipt, I will receive as many errors as number of previous iterations, and then I'll receive data from socket.
// based on boost_asio/example/cpp11/multicast/receiver.cpp
// https://www.boost.org/doc/libs/1_66_0/doc/html/boost_asio/example/cpp11/multicast/receiver.cpp
#include <array>
#include <iostream>
#include <string>
#include <boost/asio.hpp>
#include <future>
#include <chrono>
#include <thread>
using namespace std::chrono_literals;
constexpr short multicast_port = 30001;
class receiver
{
public:
explicit receiver(boost::asio::io_context& io_context) : socket_(io_context)
{}
~receiver()
{
close();
}
void open(
const boost::asio::ip::address& listen_address,
const boost::asio::ip::address& multicast_address)
{
// Create the socket so that multiple may be bound to the same address.
boost::asio::ip::udp::endpoint listen_endpoint(
listen_address, multicast_port);
socket_.open(listen_endpoint.protocol());
socket_.set_option(boost::asio::ip::udp::socket::reuse_address(true));
socket_.bind(listen_endpoint);
// Join the multicast group.
socket_.set_option(
boost::asio::ip::multicast::join_group(multicast_address));
do_receive();
}
void close()
{
if (socket_.is_open())
{
socket_.close();
}
}
private:
void do_receive()
{
socket_.async_receive_from(
boost::asio::buffer(data_), sender_endpoint_,
[this](boost::system::error_code ec, std::size_t length)
{
if (!ec)
{
std::cout.write(data_.data(), length);
std::cout << std::endl;
do_receive();
}
else
{
// A call to io_context::run in second iteration is followed by system error 995
std::cout << ec.message() << std::endl;
}
});
}
boost::asio::ip::udp::socket socket_;
boost::asio::ip::udp::endpoint sender_endpoint_;
std::array<char, 1024> data_;
};
int main(int argc, char* argv[])
{
try
{
const std::string listen_address = "0.0.0.0";
const std::string multicast_address = "239.255.0.1";
boost::asio::io_context io_context;
receiver r(io_context);
std::future<void> fut;
for (int i = 5; i > 0; --i)
{
io_context.restart();
r.open(
boost::asio::ip::make_address(listen_address),
boost::asio::ip::make_address(multicast_address));
fut = std::async(std::launch::async, [&](){ io_context.run(); });
std::this_thread::sleep_for(3s);
io_context.stop();
fut.get();
r.close();
}
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}

asio aync_send memory leak

I have next snippet:
void TcpConnection::Send(const std::vector<uint8_t>& buffer) {
std::shared_ptr<std::vector<uint8_t>> bufferCopy = std::make_shared<std::vector<uint8_t>>(buffer);
auto socket = m_socket;
m_socket->async_send(asio::buffer(bufferCopy->data(), bufferCopy->size()), [socket, bufferCopy](const boost::system::error_code& err, size_t bytesSent)
{
if (err)
{
logwarning << "clientcomms_t::sendNext encountered error: " << err.message();
// Assume that the communications path is no longer
// valid.
socket->close();
}
});
}
This code leads to memory leak. if m_socket->async_send call is commented then there is not memeory leak. I can not understand why bufferCopy is not freed after callback is dispatched. What I am doing wrong?
Windows is used.
Since you don't show any relevant code, and the code shown does not contain a strict problem, I'm going to assume from the code smells.
The smell is that you have a TcpConnection class that is not enable_shared_from_this<TcpConnection> derived. This leads me to suspect you didn't plan ahead, because there's no possible reasonable way to continue using the instance after the completion of any asynchronous operation (like the async_send).
This leads me to suspect you have a crucially simple problem, which is that your completion handler never runs. There's only one situation that could explain this, and that leads me to assume you never run() the ios_service instance
Here's the situation live:
Live On Coliru
#include <boost/asio.hpp>
namespace asio = boost::asio;
using asio::ip::tcp;
#include <iostream>
auto& logwarning = std::clog;
struct TcpConnection {
using Buffer = std::vector<uint8_t>;
void Send(Buffer const &);
TcpConnection(asio::io_service& svc) : m_socket(std::make_shared<tcp::socket>(svc)) {}
tcp::socket& socket() const { return *m_socket; }
private:
std::shared_ptr<tcp::socket> m_socket;
};
void TcpConnection::Send(Buffer const &buffer) {
auto bufferCopy = std::make_shared<Buffer>(buffer);
auto socket = m_socket;
m_socket->async_send(asio::buffer(bufferCopy->data(), bufferCopy->size()),
[socket, bufferCopy](const boost::system::error_code &err, size_t /*bytesSent*/) {
if (err) {
logwarning << "clientcomms_t::sendNext encountered error: " << err.message();
// Assume that the communications path is no longer
// valid.
socket->close();
}
});
}
int main() {
asio::io_service svc;
tcp::acceptor a(svc, tcp::v4());
a.bind({{}, 6767});
a.listen();
boost::system::error_code ec;
do {
TcpConnection conn(svc);
a.accept(conn.socket(), ec);
char const* greeting = "whale hello there!\n";
conn.Send({greeting, greeting+strlen(greeting)});
} while (!ec);
}
You'll see that any client, connection e.g. with netcat localhost 6767 will receive the greeting, after which, surprisingly the connection will stay open, instead of being closed.
You'd expect the connection to be closed by the server side either way, either because
a transmission error occurred in async_send
or because after the completion handler is run, it is destroyed and hence the captured shared-pointers are destructed. Not only would that free the copied buffer, but also would it run the destructor of socket which would close the connection.
This clearly confirms that the completion handler never runs. The fix is "easy", find a place to run the service:
int main() {
asio::io_service svc;
tcp::acceptor a(svc, tcp::v4());
a.set_option(tcp::acceptor::reuse_address());
a.bind({{}, 6767});
a.listen();
std::thread th;
{
asio::io_service::work keep(svc); // prevent service running out of work early
th = std::thread([&svc] { svc.run(); });
boost::system::error_code ec;
for (int i = 0; i < 11 && !ec; ++i) {
TcpConnection conn(svc);
a.accept(conn.socket(), ec);
char const* greeting = "whale hello there!\n";
conn.Send({greeting, greeting+strlen(greeting)});
}
}
th.join();
}
This runs 11 connections and exits leak-free.
Better:
It becomes a lot cleaner when the accept loop is also async, and the TcpConnection is properly shared as hinted above:
Live On Coliru
#include <boost/asio.hpp>
namespace asio = boost::asio;
using asio::ip::tcp;
#include <memory>
#include <thread>
#include <iostream>
auto& logwarning = std::clog;
struct TcpConnection : std::enable_shared_from_this<TcpConnection> {
using Buffer = std::vector<uint8_t>;
TcpConnection(asio::io_service& svc) : m_socket(svc) {}
void start() {
char const* greeting = "whale hello there!\n";
Send({greeting, greeting+strlen(greeting)});
}
void Send(Buffer);
private:
friend struct Server;
Buffer m_output;
tcp::socket m_socket;
};
struct Server {
Server(unsigned short port) {
_acceptor.set_option(tcp::acceptor::reuse_address());
_acceptor.bind({{}, port});
_acceptor.listen();
do_accept();
}
~Server() {
keep.reset();
_svc.post([this] { _acceptor.cancel(); });
if (th.joinable())
th.join();
}
private:
void do_accept() {
auto conn = std::make_shared<TcpConnection>(_svc);
_acceptor.async_accept(conn->m_socket, [this,conn](boost::system::error_code ec) {
if (ec)
logwarning << "accept failed: " << ec.message() << "\n";
else {
conn->start();
do_accept();
}
});
}
asio::io_service _svc;
// prevent service running out of work early:
std::unique_ptr<asio::io_service::work> keep{std::make_unique<asio::io_service::work>(_svc)};
std::thread th{[this]{_svc.run();}}; // TODO handle handler exceptions
tcp::acceptor _acceptor{_svc, tcp::v4()};
};
void TcpConnection::Send(Buffer buffer) {
m_output = std::move(buffer);
auto self = shared_from_this();
m_socket.async_send(asio::buffer(m_output),
[self](const boost::system::error_code &err, size_t /*bytesSent*/) {
if (err) {
logwarning << "clientcomms_t::sendNext encountered error: " << err.message() << "\n";
// not holding on to `self` means the socket gets closed
}
// do more with `self` which points to the TcpConnection instance...
});
}
int main() {
Server server(6868);
std::this_thread::sleep_for(std::chrono::seconds(3));
}

async_connect doesn't call handler in TCP client class

I'm trying to make a client class from boost TCP client example for my projects, and I've noticed that sometimes handle_connect doesn't get called when connecting to nonexistent host.
I've read similar issues here on stack, where people forgot to run io_service or called it before any tasks were posted, but I don't think that's my case, since I launch io_service.run() thread right after calling async_connect, and successfull connect, network unreachable, and some other cases I've tested work just fine.
Here is the full listing:
tcp_client.hpp
#ifndef TCP_CLIENT_HPP
#define TCP_CLIENT_HPP
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/chrono.hpp>
#include <boost/thread/thread.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/make_shared.hpp>
#include <mutex>
#include <iostream>
#include <iomanip>
namespace com {
using boost::asio::ip::tcp;
using namespace std;
class client : public boost::enable_shared_from_this<client> {
private:
std::mutex mx_;
bool stopped_ = 1;
boost::asio::streambuf ibuf_;
boost::shared_ptr<boost::asio::io_service> io_service_;
boost::shared_ptr<boost::asio::ip::tcp::socket> sock_;
boost::shared_ptr<tcp::resolver::iterator> ei_;
std::vector<std::string> inbound_;
std::string host_, port_;
public:
client() {}
void connect( std::string host, std::string port ) {
if (!stopped_) stop();
host_ = host; port_ = port;
io_service_.reset(new boost::asio::io_service);
sock_.reset(new boost::asio::ip::tcp::socket(*io_service_));
ei_.reset(new tcp::resolver::iterator);
tcp::resolver r(*io_service_);
ei_ = boost::make_shared<tcp::resolver::iterator>( r.resolve(tcp::resolver::query(host_, port_)) );
stopped_ = 0;
start_connect();
boost::thread work( boost::bind(&client::work, shared_from_this()) );
return;
}
bool is_running() {
return !stopped_;
}
void stop() {
stopped_ = 1;
sock_->close();
return;
}
void send(std::string str) {
if (stopped_) return;
auto msg = boost::asio::buffer(str, str.size());
boost::asio::async_write( (*sock_), msg, boost::bind(&client::handle_write, shared_from_this(), _1) );
return;
}
std::string pull() {
std::lock_guard<std::mutex> lock(mx_);
std::string msg;
if (inbound_.size()>0) {
msg = inbound_.at(0);
inbound_.erase(inbound_.begin());
}
return msg;
}
int size() {
std::lock_guard<std::mutex> lock(mx_);
return inbound_.size();
}
void clear() {
std::lock_guard<std::mutex> lock(mx_);
inbound_.clear();
return;
}
private:
void work() {
if (stopped_) return;
std::cout<<"work in"<<std::endl;
io_service_->run();
std::cout<<"work out"<<std::endl;
return;
}
void start_connect() {
if ((*ei_) != tcp::resolver::iterator()) {
std::cout<<"Trying "<<(*ei_)->endpoint()<<std::endl;
sock_->async_connect( (*ei_)->endpoint(), boost::bind(&client::handle_connect, shared_from_this(), boost::asio::placeholders::error) );
} else {
stop();
}
return;
}
void handle_connect(const boost::system::error_code& ec) {
if (stopped_) return;
if (!sock_->is_open()) {
std::cout<<"Socket closed"<<std::endl;
(*ei_)++;
start_connect();
} else if (ec) {
std::cout<<"Connect error: "<<ec.message()<<std::endl;
sock_->close();
(*ei_)++;
start_connect();
} else {
std::cout<<"Connected to "<<(*ei_)->endpoint()<<std::endl;
start_read();
}
return;
}
void start_read() {
if (stopped_) return;
boost::asio::async_read_until((*sock_), ibuf_, "", boost::bind(&client::handle_read, shared_from_this(), boost::asio::placeholders::error));
return;
}
void handle_read(const boost::system::error_code& ec) {
std::lock_guard<std::mutex> lock(mx_);
if (stopped_) return;
if (ec) {
std::cout<<"Read error: "<<ec.message()<<std::endl;
stop();
return;
}
std::string line;
std::istream is(&ibuf_);
std::getline(is, line);
if (!line.empty() && inbound_.size()<1000) inbound_.push_back(line);
start_read();
return;
}
private:
void handle_write(const boost::system::error_code& ec) {
if (stopped_) return;
if (ec) {
std::cout<<"Write error: "<<ec.message()<<std::endl;
stop();
return;
}
return;
}
};
};
and tcp_test.cpp
#include "tcp_client.hpp"
int main(int argc, char* argv[]) {
auto tcp_client = boost::shared_ptr<com::client>(new com::client);
try {
tcp_client->connect("192.168.1.15", "50000");
boost::this_thread::sleep_for(boost::chrono::milliseconds(1000));
tcp_client->connect("192.168.1.20", "50000");
} catch (std::exception& e) {
std::cerr<<"Exception: "<<e.what()<<std::endl;
}
int cnt=0;
while (cnt<5) {
std::cout<<cnt<<std::endl;
cnt++;
tcp_client->send("<test>");
boost::this_thread::sleep_for(boost::chrono::milliseconds(500));
}
tcp_client->stop();
while (tcp_client->size()>0) std::cout<<tcp_client->pull()<<std::endl;
return 0;
}
The output I get is when connecting to loopback server:
Trying 192.168.1.15:50000
work in
work out
Trying 192.168.1.20:50000
0
work in
Connected to 192.168.1.20:50000
1
2
3
4
work out
<test>
<test>
<test>
<test>
<test>
The 192.168.1.20 works just as it should, as you see. The 192.168.1.15 doesnt'e exist, but I've expected it to throw some kind of error. Instead io_service.run() returns right away, like async_connect never posted callback task. Maybe it's related to endpoint iterator and not async_connect?
Can anyone please explain why is it happening like this?
Then I've tried to isolate the problem in this code:
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/chrono.hpp>
#include <boost/thread/thread.hpp>
boost::asio::io_service io_svc;
boost::asio::ip::tcp::socket sock(io_svc);
boost::asio::ip::tcp::resolver::iterator ei;
void work() {
std::cout<<"work in"<<std::endl;
io_svc.run();
std::cout<<"work out"<<std::endl;
return;
}
void stop() {
sock.close();
return;
}
void start_connect();
void handle_connect(const boost::system::error_code& ec) {
if (!sock.is_open()) {
std::cout<<"Socket closed"<<std::endl;
ei++;
start_connect();
} else if (ec) {
std::cout<<"Connect error: "<<ec.message()<<std::endl;
sock.close();
ei++;
start_connect();
} else {
std::cout<<"Connected to "<<ei->endpoint()<<std::endl;
}
return;
}
void start_connect() {
if (ei != boost::asio::ip::tcp::resolver::iterator()) {
std::cout<<"Trying "<<ei->endpoint()<<std::endl;
sock.async_connect( ei->endpoint(), boost::bind(handle_connect, boost::asio::placeholders::error) );
} else {
stop();
}
return;
}
int main(int argc, char* argv[]) {
std::string host="192.168.1.15", port="50000";
boost::asio::ip::tcp::resolver r(io_svc);
ei = r.resolve(boost::asio::ip::tcp::resolver::query(host, port));
start_connect();
boost::thread* thr = new boost::thread(work);
boost::this_thread::sleep_for(boost::chrono::milliseconds(2000));
return 0;
}
But I've got a totally different result. When I try to connect to a nonexistent host, most of the time it's:
Trying 192.168.1.15:50000
work in
Sometimes it's:
Trying 192.168.1.15:50000
work in
Connect error: Operation canceled
Connect error: Operation canceled
And rarely it's:
Trying 192.168.1.15:50000
work in
Segmentation fault
"work out" is never printed, so I'm guessing io_service in this example is doing something, but how is this different from previous code, and why I get "operation canceled" error only sometimes?
A client running in a background thread should look something like this.
Note that I have note included things like connection timeouts. For that you'd want to have a deadline timer running in parallel with the async_connect. Then you'd have to correctly handle crossing cases (hint: cancel the deadline timer on successful connect and throw away the ensuing error from its async_wait).
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/chrono.hpp>
#include <thread>
#include <functional>
boost::asio::io_service io_svc;
struct client
: std::enable_shared_from_this<client>
{
using protocol = boost::asio::ip::tcp;
using resolver = protocol::resolver;
using socket = protocol::socket;
using error_code = boost::system::error_code;
client(boost::asio::io_service& ios)
: ios_(ios) {}
void start(std::string const& host, std::string const& service)
{
auto presolver = std::make_shared<resolver>(get_io_service());
presolver->async_resolve(protocol::resolver::query(host, service),
strand_.wrap([self = shared_from_this(), presolver](auto&& ec, auto iter)
{
self->handle_resolve(ec, presolver, iter);
}));
}
private:
void
handle_resolve(boost::system::error_code const& ec, std::shared_ptr<resolver> presolver, resolver::iterator iter)
{
if (ec) {
std::cerr << "error resolving: " << ec.message() << std::endl;
}
else {
boost::asio::async_connect(sock, iter, strand_.wrap([self = shared_from_this(),
presolver]
(auto&& ec, auto iter)
{
self->handle_connect(ec, iter);
// note - we're dropping presolver here - we don't need it any more
}));
}
}
void handle_connect(error_code const& ec, resolver::iterator iter)
{
if (ec) {
std::cerr << "failed to connect: " << ec.message() << std::endl;
}
else {
auto payload = std::make_shared<std::string>("Hello");
boost::asio::async_write(sock, boost::asio::buffer(*payload),
strand_.wrap([self = shared_from_this(),
payload] // note! capture the payload so it continues to exist during async send
(auto&& ec, auto size)
{
self->handle_send(ec, size);
}));
}
}
void handle_send(error_code const& ec, std::size_t size)
{
if (ec) {
std::cerr << "send failed after " << size << " butes : " << ec.message() << std::endl;
}
else {
// send something else?
}
}
boost::asio::io_service& get_io_service()
{
return ios_;
}
private:
boost::asio::io_service& ios_;
boost::asio::strand strand_{get_io_service()};
socket sock{get_io_service()};
};
void work()
{
std::cout << "work in" << std::endl;
io_svc.run();
std::cout << "work out" << std::endl;
return;
}
int main(int argc, char *argv[])
{
auto pclient = std::make_shared<client>(io_svc);
std::string host = "192.168.1.15", port = "50000";
pclient->start(host, port);
auto run_thread = std::thread(work);
if (run_thread.joinable())
run_thread.join();
return 0;
}
example output:
work in
<time passes>...
failed to connect: Operation timed out
work out

how to do boost::asio::spawn with io_service-per-CPU?

My server is based on boost spawn echo server.
The server runs fine on single-core machine, not even one crash for several months. Even when it takes 100% CPU it still works fine.
But I need to handle more client requests, now I use multi-core machine. To use all the CPUs I run io_service on several thread, like this:
#include <boost/asio/io_service.hpp>
#include <boost/asio/ip/tcp.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/steady_timer.hpp>
#include <boost/asio/write.hpp>
#include <boost/thread/thread.hpp>
#include <iostream>
#include <memory>
#include <thread>
using namespace std;
using boost::asio::ip::tcp;
class session : public std::enable_shared_from_this<session>{
public:
explicit session(tcp::socket socket)
: socket_(std::move(socket)),
timer_(socket_.get_io_service()),
strand_(socket_.get_io_service())
{}
void go()
{
auto self(shared_from_this());
boost::asio::spawn(strand_, [this, self](boost::asio::yield_context yield)
{
try {
char data[1024] = {'3'};
for( ; ;) {
timer_.expires_from_now(std::chrono::seconds(10));
std::size_t n = socket_.async_read_some(boost::asio::buffer(data, sizeof(data)), yield);
// do something with data
// write back something
boost::asio::async_write(socket_, boost::asio::buffer(data, sizeof(data)), yield);
}
} catch(...) {
socket_.close();
timer_.cancel();
}
});
boost::asio::spawn(strand_, [this, self](boost::asio::yield_context yield)
{
while(socket_.is_open()) {
boost::system::error_code ignored_ec;
timer_.async_wait(yield[ignored_ec]);
if(timer_.expires_from_now() <= std::chrono::seconds(0))
socket_.close();
}
});
}
private:
tcp::socket socket_;
boost::asio::steady_timer timer_;
boost::asio::io_service::strand strand_;
};
int main(int argc, char* argv[]) {
try {
boost::asio::io_service io_service;
boost::asio::spawn(io_service, [&](boost::asio::yield_context yield)
{
tcp::acceptor acceptor(io_service,
#define PORT "7788"
tcp::endpoint(tcp::v4(), std::atoi(PORT)));
for( ; ;) {
boost::system::error_code ec;
tcp::socket socket(io_service);
acceptor.async_accept(socket, yield[ec]);
if(!ec)
// std::make_shared<session>(std::move(socket))->go();
io_service.post(boost::bind(&session::go, std::make_shared<session>(std::move(socket))));
}
});
// ----------- this works fine on single-core machine ------------
{
// io_service.run();
}
// ----------- this crashes (with multi core) ----------
{
auto thread_count = std::thread::hardware_concurrency(); // for multi core
boost::thread_group threads;
for(auto i = 0; i < thread_count; ++i)
threads.create_thread(boost::bind(&boost::asio::io_service::run, &io_service));
threads.join_all();
}
} catch(std::exception& e) {
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
The code works fine on single-core maching, but crashes all the time on 2-core/4-core/8-core machine. From the crash dump I don't see anything related to my code, just about something with boost::spawn and some randomly named lambda.
So I want to try this: Run io_service per CPU.
I found some demo, but it uses async function:
void server::start_accept()
{
new_connection_.reset(new connection(
io_service_pool_.get_io_service(), request_handler_));
acceptor_.async_accept(new_connection_->socket(),
boost::bind(&server::handle_accept, this,
boost::asio::placeholders::error));
}
void server::handle_accept(const boost::system::error_code& e)
{
if (!e)
{
new_connection_->start();
}
start_accept();
}
The io_service_pool_.get_io_service() randomly pickup an io_service, but my code uses spawn
boost::asio::spawn(io_service, ...
How to spawn with random io_service?
Seems I was asking the wrong question, spawn cannot work with multiple io_service, but the socket can. I modified the code to this:
int main(int argc, char* argv[]) {
try {
boost::asio::io_service io_service;
boost::asio::io_service::work work(io_service);
auto core_count = std::thread::hardware_concurrency();
// io_service_pool.hpp and io_service_pool.cpp from boost's example
io_service_pool pool(core_count);
boost::asio::spawn(io_service, [&](boost::asio::yield_context yield)
{
#define PORT "7788"
tcp::acceptor acceptor(io_service, tcp::endpoint(tcp::v4(), std::atoi(PORT)));
for( ; ;) {
boost::system::error_code ec;
boost::asio::io_service& ios = pool.get_io_service();
tcp::socket socket(ios);
acceptor.async_accept(socket, yield[ec]);
if(!ec)
ios.post(boost::bind(&session::go, std::make_shared<session>(std::move(socket))));
}
});
{ // run all io_service
thread t([&] { pool.run(); });
t.detach();
io_service.run();
}
} catch(std::exception& e) {
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
Now the server doesn't crash anymore. But I still have no idea what could cause the crash if I use a single io_service for all threads.