I am working on a Network Application using ASIO and have referred Chat-Server/Client
I have asked similar question Here
To explain better I am adding more code here:
My Cserver Class
class CServer {
private:
mutable tcp::acceptor acceptor_; // only in the listener
asio::io_service& io_;
CSerSessionsManager mng_;
std::string ip_;
std::string port_;
public:
CServer::CServer(asio::io_service& io_service, const std::string IP, const std::string port) : io_(io_service), acceptor_(io_service)
, ip_(IP), port_(port)
{
DEBUG_MSG("Listener Created");
}
~CServer()
{
DEBUG_MSG("Listener Destroyed");
acceptor_.close();
}
void initProtocol()
{
DEBUG_MSG(" Protocol Initiated");
std::array<unsigned char, 4> ip;
std::string delimiter = ".";
//Parse the IP String
size_t pos = 0;
auto i = 0;
std::string token;
while ((pos = ip_.find(delimiter)) != std::string::npos) {
token = ip_.substr(0, pos);
ip[i] = std::stoi(token);//what if stoi fails
i++;
ip_.erase(0, pos + delimiter.length());
}
ip[i] = std::stoi(ip_);
asio::ip::address_v4 address(ip);
tcp::endpoint ep(address, std::stoi(port_));
static std::mutex m;
std::unique_lock<std::mutex> lck(m, std::defer_lock);
//Critical Section start
lck.lock();
acceptor_ = tcp::acceptor(io_, ep);//Creating IOService
lck.unlock();
//Critical Section End
listen();
}
void listen()
{
DEBUG_MSG("!==============================================================!");
////Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
sessionPtr newSession = std::make_shared<CSerSession>(io_, mng_);
try
{
acceptor_.async_accept(newSession->socket(), std::bind(&CServer::handle_accept, /*shared_from_this()*/ this, newSession,
std::placeholders::_1));
///*asio::error_code ec;
//pSocket_->shutdown(asio::ip::tcp::socket::shutdown_send, ec);*/
}
catch (const std::bad_weak_ptr& e)
{
DEBUG_MSG(e.what());
throw e;
}
DEBUG_MSG("Listen Activated");
}
void handle_accept(sessionPtr newSession, const asio::error_code& error)
{
if (!acceptor_.is_open())
{
return;
}
if (!error)
{
DEBUG_MSG("Incoming Session accepted");
//Do I need a Lock here?
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
newSession->startSession();
listen();
}
else
{
DEBUG_MSG("Listen_Error");
// //throw ASIOError(Listen_Error);
DEBUG_MSG(error.message());
return;
}
}
};
My CSerSessionsManager Class
class CSerSessionsManager{
private:
std::set<sessionPtr> sessions_; //Active Sessions : Online Info
public:
CSerSessionsManager();
~CSerSessionsManager();
void addSession(sessionPtr session);
void dropSession(sessionPtr session);
};
CSerSessionsManager::CSerSessionsManager()
{
DEBUG_MSG("Construction");
}
CSerSessionsManager::~CSerSessionsManager()
{
DEBUG_MSG("Destruction");
}
void CSerSessionsManager::addSession(sessionPtr session)
{
DEBUG_MSG("Incoming Session Entry saved");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
sessions_.insert(session);
}
void CSerSessionsManager::dropSession(sessionPtr session)
{
//Properly handle Existing connections first shutdown sockets
DEBUG_MSG("Session dropped");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
std::set<sessionPtr>::iterator it;
for (it = sessions_.begin(); it != sessions_.end(); ++it)
{
if ((*it) == session)
{
sessions_.erase(session);
return;
}
}
//throw ASIOError(Session_Not_Found);
}
And my CSerSession Class
class CSerSession : public std::enable_shared_from_this < CSerSession > {
private:
mutable tcp::socket socket_; // client connection
CSerSessionsManager& manager_;
std::string ip_;
std::string port_;
CBuffer msg_;
public:
CSerSession(asio::io_service& io_service, CSerSessionsManager& mng) :
manager_(mng), socket_(io_service)
{
DEBUG_MSG("Server Session Created");
}
~CSerSession()
{
DEBUG_MSG("Server Session Destroyed");
}
void startSession()
{
DEBUG_MSG("Server Session Started");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
manager_.addSession(shared_from_this());//Multiple threads should not try adding section
read(msg_);
}
void handle_read(const asio::error_code& error /*error*/, size_t bytes_transferred /*bytes_transferred*/)
{
if (!error)
{
DEBUG_MSG("Read");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
read(msg_);
}
else
{
DEBUG_MSG("Read Error Detected : " << error.message());
//Check If shared_from_this() is valid or not
try
{
//Check if session was already dropped e.g. server object destroying
//i.e. if session object exists
DEBUG_MSG("Dropping Session");
//if (error == asio::error::operation_aborted)
manager_.dropSession(shared_from_this());
}
catch (const std::bad_weak_ptr& e)
{
DEBUG_MSG(e.what());
throw e;
}
return;
}
}
void read(CBuffer & buff)
{
DEBUG_MSG("Read");
asio::async_read(socket_, asio::buffer(const_cast<char *> (buff.getReceived()), buff.buffsize),
std::bind(&CSerSession::handle_read, shared_from_this(),
std::placeholders::_1, std::placeholders::_2));
}
tcp::socket& socket()
{
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
return socket_;
}
};
I create the CServer Object in main as below:
void main()
{
try
{
asio::io_service io_service;
//CServer server(io_service, "Default", "127.0.0.1", "8000");
auto sPtr = std::make_shared<CServer>(io_service, "127.0.0.1", "8000");
sPtr->initProtocol();
//server.initProtocol();
asio::thread t(boost::bind(&asio::io_service::run, &io_service));
}
catch (...)
{
}
system("Pause");
}
The Output Log I get as below:
CSerSessionsManager::CSerSessionsManager : 183 : Construction
CServer::CServer : 239 : Listener Created
CServer::initProtocol : 250 : Protocol Initiated
CServer::listen : 288 : !==============================================================!
CSerSession::CSerSession : 108 : Server Session Created
CServer::listen : 309 : Listen Activated
CServer::~CServer : 244 : Listener Destroyed
CSerSessionsManager::~CSerSessionsManager : 188 : Destruction
CSerSession::~CSerSession : 113 : Server Session Destroyed
When CServer Object destroys associated CSerSession Object also destroys
, so while returning from ~CSerSession() It throws exception boost::exception_detail::clone_impl<boost::exception_detail::error_info_injector<std::system_error> > at memory location 0x0277F19C.
at below lines of code:
#ifndef BOOST_EXCEPTION_DISABLE
throw enable_current_exception(enable_error_info(e));
#else
throw e;
#endif
}
I tried to debug a lot and tried using signal mechanism also as discussed in HTTP Server, but I am stuck here and not able to proceed further.
The complete code can be checked here:
MyCode
How do I resolve it?
From a fixed version of the linked code: Live On Coliru I get
CSerSessionsManager : 184 : Construction
CServer : 240 : Listener Created
initProtocol : 251 : Protocol Initiated
~CServer : 245 : Listener Destroyed
~CSerSessionsManager : 189 : Destruction
NOTE: this was because I already had something listening on port 8000 (yay for error reporting!)
Did the initialization order of the fields fix it? Or is there something not running at all on my system (because of a race condition on my faster machine?).
Looks like the latter becuase on Coliru I got
CSerSessionsManager : 184 : Construction
CServer : 240 : Listener Created
initProtocol : 251 : Protocol Initiated
listen : 289 : !===================================!
CSerSession : 109 : Server Session Created
listen : 310 : Listen Activated
~CServer : 245 : Listener Destroyed
~CSerSessionsManager : 189 : Destruction
~CSerSession : 114 : Server Session Destroyed
So, let's have a closer look:
why are you parsing the IP string? That's what address_v4 is for. And ip::tcp::resolver.
DEBUG_MSG(" Protocol Initiated");
asio::ip::address_v4 address = asio::ip::address_v4::from_string(ip_);
tcp::endpoint ep(address, std::stoi(port_));
using a static mutex is rarely useful. Did you mean to synchronize access to shared resources? Then you need a shared mutex too
why are you using defer-lock? Use scopes
{
//Critical Section start
std::lock_guard<std::mutex> lck(mutex_);
acceptor_ = tcp::acceptor(io_, ep);//Creating IOService
//Critical Section End
}
the main thread just exits, never joining the io thread. At least join. Or make it properly shutdown before terminating the program:
t.join();
hungarian naming is really useless here. sPtr doesn't tell me anything. server or, if you insist, server_ptr is what you need to know.
you have out-of-bounds write here:
received_[str.size()] = '\0';
you wanted
received_[len] = '\0';
your empty doesn't need to loop
bool empty() const
{
return !received_[0];
}
why are you looping to find stuff in an ordered set?
std::set<sessionPtr>::iterator it;
for (it = sessions_.begin(); it != sessions_.end(); ++it)
{
if ((*it) == session)
{
sessions_.erase(session);
return;
}
}
should be
sessions_.erase(session);
addSession/dropSession are internally locking; you don't need to put access to them in a critical section
throw e is an antipattern; just throw; is re-throw
you have redundant tracing almost everywhere (this is what debuggers are for). E.g. DEBUG_MSG("Read")
Locking here is bogus:
tcp::socket& socket()
{
// Critical Section
std::lock_guard<std::mutex> lock(mutex_);
return socket_;
}
The reference returned will not be protected anyways, and socket is only initialized once.
all the thread locking seems redundant since there is only one service thread
CBuffer msg is a bogus parameter to read() as the same buffer is passed always. This could be plenty ok (it's in the same session), so, just use it.
this
acceptor_ = tcp::acceptor(io_, ep);
should be
acceptor_.bind(ep);
and not in a critical section (server is only created once); Hence the initProtocol function can be
void initProtocol()
{
acceptor_.bind(tcp::endpoint(asio::ip::address_v4::from_string(ip_), std::stoi(port_)));
listen();
}
in listen you're catching bad_weak_ptr which can't even occur
here:
//Do I need a Lock here?
//Critical Section
std::lock_guard<std::mutex> lock(mutex_);
newSession->startSession();
you don't need the lock. newSession was bound from a local variable. It's impossible for it to be shared unless you copied the completion handler (you didn't).
Here's a more fixed up version:
Live On Coliru
#include <iostream>
#include <boost/asio.hpp>
#include <memory>
#include <deque>
#include <set>
#include <iomanip>
#include <mutex>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#define DEBUG ON
#ifdef DEBUG
#define DEBUG_MSG(str) do {std::cout << std::setw(75) << std::left << __FUNCTION__ \
<< std::setw(3) << std::left << ":" << std::setw(5) << std::left << __LINE__ \
<< std::setw(5) << std::left << ":"\
<< std::left << str \
<< std::endl;} while( false )
#else
#define DEBUG_MSG(str) do { } while ( false )
#endif
namespace asio = boost::asio;
using asio::ip::tcp;
class CSerSession;
using sessionPtr = std::shared_ptr<CSerSession>;
class CSerSessionsManager {
private:
mutable std::mutex mutex_;
std::set<sessionPtr> sessions_; // Active Sessions : Online Info
public:
CSerSessionsManager();
~CSerSessionsManager();
void addSession(sessionPtr session);
void dropSession(sessionPtr session);
};
class CBuffer {
public:
enum { buffsize = 32 };
private:
char received_[buffsize];
public:
CBuffer() : received_{} {}
CBuffer(const std::string str)
{
// Truncate if Overflow
auto len = str.size();
if (len >= buffsize) {
len = buffsize - 1;
}
std::copy(str.begin(), str.begin() + len, received_);
received_[len] = '\0';
}
bool empty() const
{
return !received_[0];
}
const std::string getString() const { return std::string(received_); }
const char* getReceived() const { return received_; }
};
class CSerSession : public std::enable_shared_from_this<CSerSession> {
private:
mutable std::mutex mutex_;
mutable tcp::socket socket_; // client connection
CSerSessionsManager& manager_;
std::string ip_;
std::string port_;
CBuffer msg_;
public:
CSerSession(asio::io_service& io_service, CSerSessionsManager& mng) : socket_(io_service), manager_(mng)
{
DEBUG_MSG("Server Session Created");
}
~CSerSession() { DEBUG_MSG("Server Session Destroyed"); }
void startSession()
{
DEBUG_MSG("Server Session Started");
manager_.addSession(shared_from_this()); // Multiple threads should not try adding section
read();
}
tcp::socket& socket() { return socket_; }
private:
void handle_read(const boost::system::error_code& error /*error*/, size_t /*bytes_transferred*/)
{
if (!error) {
read();
} else {
DEBUG_MSG("Read Error Detected : " << error.message());
manager_.dropSession(shared_from_this()); // might throw
}
}
void read()
{
std::lock_guard<std::mutex> lock(mutex_);
DEBUG_MSG("Read");
asio::async_read(socket_, asio::buffer(const_cast<char*>(msg_.getReceived()), msg_.buffsize),
std::bind(&CSerSession::handle_read, shared_from_this(), std::placeholders::_1, std::placeholders::_2));
}
};
CSerSessionsManager::CSerSessionsManager()
{
DEBUG_MSG("Construction");
}
CSerSessionsManager::~CSerSessionsManager()
{
DEBUG_MSG("Destruction");
}
void CSerSessionsManager::addSession(sessionPtr session)
{
std::lock_guard<std::mutex> lock(mutex_);
DEBUG_MSG("Incoming Session Entry saved");
sessions_.insert(session);
}
void CSerSessionsManager::dropSession(sessionPtr session)
{
std::lock_guard<std::mutex> lock(mutex_);
DEBUG_MSG("Session dropped");
sessions_.erase(session);
}
class CServer {
private:
mutable std::mutex mutex_;
asio::io_service& io_;
mutable tcp::acceptor acceptor_; // only in the listener
CSerSessionsManager mng_;
public:
CServer(asio::io_service& io_service, const std::string& IP, int port)
: io_(io_service), acceptor_(io_, tcp::endpoint(asio::ip::address::from_string(IP), port))
{
DEBUG_MSG("Listener Created");
}
~CServer()
{
DEBUG_MSG("Listener Destroyed");
acceptor_.close(); // likely to be redundant
}
void initProtocol()
{
listen();
}
private:
void listen()
{
DEBUG_MSG("!==============================================================!");
sessionPtr newSession = std::make_shared<CSerSession>(io_, mng_);
std::lock_guard<std::mutex> lock(mutex_);
acceptor_.async_accept(newSession->socket(), std::bind(&CServer::handle_accept, this, newSession,
std::placeholders::_1));
}
void handle_accept(sessionPtr newSession, const boost::system::error_code& error)
{
if (error || !acceptor_.is_open()) {
DEBUG_MSG("Listen_Error");
DEBUG_MSG(error.message());
return;
}
DEBUG_MSG("Incoming Session accepted");
newSession->startSession();
listen();
}
};
int main()
{
try
{
asio::io_service io_service;
auto server = std::make_shared<CServer>(io_service, "127.0.0.1", 8973);
server->initProtocol();
boost::thread t(boost::bind(&asio::io_service::run, &io_service));
boost::this_thread::sleep_for(boost::chrono::seconds(3));
t.join();
}
catch (...)
{
}
}
Prints (for a single connection):
CSerSessionsManager : 123 : Construction
CServer : 156 : Listener Created
listen : 173 : !==============================================================!
CSerSession : 86 : Server Session Created
handle_accept : 190 : Incoming Session accepted
startSession : 93 : Server Session Started
addSession : 134 : Incoming Session Entry saved
read : 114 : Read
listen : 173 : !==============================================================!
CSerSession : 86 : Server Session Created
handle_read : 106 : Read Error Detected : End of file
dropSession : 141 : Session dropped
~CSerSession : 89 : Server Session Destroyed
Related
I'm working on a project which implement a boost beast service.This part of code was written by a person who left the company and I do not master boot.
Until now it worked well but the size of the payload has increased and it no longer works. The payload is about 2.4MB.
The service is implemented using 3 classes ServerService, Listener and Session.
ServerService:
void ServerService::startServer(const std::string& address, const unsigned short& port,
const std::string& baseRessourceName, const unsigned short& threadNumber)
{
try
{
const auto srvAddress = boost::asio::ip::make_address(address);
// The io_context is required for all I/O
auto const nbThreads = std::max<int>(1, threadNumber);
boost::asio::io_context ioContext(nbThreads);
// Create listener and launch a listening port
std::shared_ptr<Listener> listener = std::make_shared<Listener>(ioContext, tcp::endpoint{ srvAddress, port }, baseRessourceName);
listener->run();
// Run the I/O service on the requested number of threads
std::vector<std::thread> threads;
threads.reserve(nbThreads - 1);
for (auto i = nbThreads - 1; i > 0; --i)
{
threads.emplace_back([&ioContext] { ioContext.run(); });
}
ioContext.run();
}
catch (std::exception const& e)
{
LBC_ERROR("{}", e.what());
}
}
Listener:
// Used namespace
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
namespace Http
{
class Listener : public std::enable_shared_from_this<Listener>
{
private:
tcp::acceptor m_acceptor;
tcp::socket m_socket;
std::string const& m_baseResourceName;
// Report a failure
void logError(boost::system::error_code errorCode, char const* what)
{
LBC_ERROR("{}: {}", what, errorCode.message());
}
public:
Listener(boost::asio::io_context& ioContext, tcp::endpoint endpoint, std::string const& docRoot)
: m_acceptor(ioContext)
, m_socket(ioContext)
, m_baseResourceName(docRoot)
{
boost::system::error_code errorCode;
// Open the acceptor
m_acceptor.open(endpoint.protocol(), errorCode);
if (errorCode)
{
logError(errorCode, "open");
return;
}
// Allow address reuse
m_acceptor.set_option(boost::asio::socket_base::reuse_address(true));
if (errorCode)
{
logError(errorCode, "set_option");
return;
}
// Bind to the server address
m_acceptor.bind(endpoint, errorCode);
if (errorCode)
{
logError(errorCode, "bind");
return;
}
// Start listening for connections
m_acceptor.listen(boost::asio::socket_base::max_listen_connections, errorCode);
if (errorCode)
{
logError(errorCode, "listen");
return;
}
}
// Start accepting incoming connections
void run()
{
if (!m_acceptor.is_open()) {
return;
}
doAccept();
}
void doAccept()
{
m_acceptor.async_accept(m_socket,
std::bind(
&Listener::onAccept,
shared_from_this(),
std::placeholders::_1));
}
void onAccept(boost::system::error_code errorCode)
{
if (errorCode)
{
logError(errorCode, "accept");
}
else
{
// Create the session and run it
std::make_shared<Session>(
std::move(m_socket),
m_baseResourceName)->run();
}
// Accept another connection
doAccept();
}
};
} // namespace Http
Session:
// Used namespaces
using tcp = boost::asio::ip::tcp; // from <boost/asio/ip/tcp.hpp>
namespace boostHttp = boost::beast::http; // from <boost/beast/http.hpp>
namespace Http
{
class Session : public std::enable_shared_from_this<Session>
{
private:
// This is the C++11 equivalent of a generic lambda.
// The function object is used to send an HTTP message.
struct send_lambda
{
Session& self_;
explicit send_lambda(Session& self) : self_(self) {}
template<bool isRequest, class Body, class Fields>
void operator()(boostHttp::message<isRequest, Body, Fields>&& msg) const
{
// The lifetime of the message has to extend
// for the duration of the async operation so
// we use a shared_ptr to manage it.
auto sp = std::make_shared<boostHttp::message<isRequest, Body, Fields>>(std::move(msg));
// Store a type-erased version of the shared
// pointer in the class to keep it alive.
self_.res_ = sp;
// Write the response
boostHttp::async_write(self_.socket_, *sp,
boost::asio::bind_executor(
self_.strand_, std::bind(
&Session::onWrite,
self_.shared_from_this(),
std::placeholders::_1,
std::placeholders::_2,
sp->need_eof())));
}
};
// Report a failure
void logError(boost::system::error_code errorCode, char const* what)
{
LBC_ERROR("{}: {}", what, errorCode.message());
}
tcp::socket socket_;
boost::asio::strand<boost::asio::any_io_executor> strand_;
boost::beast::flat_buffer buffer_;
std::string const& baseResourceName_;
boostHttp::request<boostHttp::string_body> req_;
std::shared_ptr<void> res_;
send_lambda lambda_;
public:
// Take ownership of the socket
explicit Session(tcp::socket socket, std::string const& docRoot)
: socket_(std::move(socket))
, strand_(socket_.get_executor())
, baseResourceName_(docRoot)
, lambda_(*this)
{}
// Start the asynchronous operation
void run()
{
doRead();
}
void doRead()
{
// Make the request empty before reading,
// otherwise the operation behavior is undefined.
req_ = {};
// Read a request
boostHttp::async_read(socket_, buffer_, req_,
boost::asio::bind_executor(
strand_, std::bind(
&Session::onRead,
shared_from_this(),
std::placeholders::_1,
std::placeholders::_2)));
}
void onRead(boost::system::error_code errorCode, std::size_t transferredBytes)
{
boost::ignore_unused(transferredBytes);
// This means they closed the connection
if (errorCode == boostHttp::error::end_of_stream)
{
return doClose();
}
if (errorCode) {
return logError(errorCode, "*** read"); // Error is here
}
// Some stuff here to manage request
}
void onWrite(boost::system::error_code ec, std::size_t transferredBytes, bool close)
{
boost::ignore_unused(transferredBytes);
if (ec)
{
return logError(ec, "write");
}
if (close)
{
// This means we should close the connection, usually because
// the response indicated the "Connection: close" semantic.
return doClose();
}
// We're done with the response so delete it
res_ = nullptr;
// Read another request
doRead();
}
void doClose()
{
// Send a TCP shutdown
boost::system::error_code ec;
socket_.shutdown(tcp::socket::shutdown_send, ec);
// At this point the connection is closed gracefully
}
};
} // namespace Http
The service is launched as follow:
Service::ServerService serverService;
serverService.startServer("127.0.0.1", 8080, "service_name", 5);
I saw in the boost documentation that the default limit is 1MB. I tried some examples found on the internet to implement a parser and change the body limit but when I send a payload I get the following error "Unknown HTTP request" !
I hope someone can help me solve this problem. Thank you in advance for your answers.
First I made your code self-contained, more modern, simpler and stripped unused code. I chose libfmt to implement the logging requirements, showing how to use source location instead of tediously providing manual context.
Live On Coliru
#include <boost/asio.hpp>
#include <boost/beast.hpp>
#include <iostream>
namespace beast = boost::beast;
namespace http = beast::http;
namespace net = boost::asio;
using boost::system::error_code;
using net::ip::tcp;
#include <fmt/ranges.h>
#include <fmt/ostream.h>
template <> struct fmt::formatter<boost::source_location> : fmt::ostream_formatter {};
#define LBC_ERROR(FMTSTR, ...) fmt::print(stderr, FMTSTR "\n", __VA_ARGS__)
// Report a failure
static void inline logError(error_code ec, char const* what) {
LBC_ERROR("{}: {} from {}", what, ec.message(), ec.location());
}
static void inline logError(std::exception const& e) { logError({}, e.what()); }
namespace Http {
using namespace std::placeholders;
using Executor = net::any_io_executor;
class Session : public std::enable_shared_from_this<Session> {
private:
tcp::socket socket_;
std::string baseResourceName_; // TODO FIXME unused
boost::beast::flat_buffer buffer_;
http::request<http::string_body> req_;
public:
// Take ownership of the socket
explicit Session(tcp::socket socket, std::string docRoot)
: socket_(std::move(socket))
, baseResourceName_(std::move(docRoot)) {}
void run() {
std::cerr << "Started session for " << socket_.remote_endpoint() << std::endl;
doRead();
}
~Session() {
error_code ec;
auto ep = socket_.remote_endpoint(ec);
std::cerr << "Close session for " << ep << std::endl;
}
private:
void doRead() {
// Make the request empty before reading, otherwise the operation
// behavior is undefined.
req_.clear();
// Read a request
http::async_read(socket_, buffer_, req_,
std::bind(&Session::onRead, shared_from_this(), _1, _2));
}
void onRead(error_code ec, size_t transferredBytes) {
boost::ignore_unused(transferredBytes);
// This means they closed the connection
if (ec == http::error::end_of_stream) {
return doClose();
}
if (ec) {
return logError(ec, "*** read"); // Error is here
}
// Some stuff here to manage request
}
void onWrite(error_code ec, size_t transferredBytes, bool close) {
boost::ignore_unused(transferredBytes);
if (ec) {
return logError(ec, "write");
}
if (close) {
// This means we should close the connection, usually because
// the response indicated the "Connection: close" semantic.
return doClose();
}
// Read another request
doRead();
}
void doClose() {
// Send a TCP shutdown
error_code ec;
socket_.shutdown(tcp::socket::shutdown_send, ec);
// At this point the connection is closed gracefully
}
};
} // namespace Http
namespace Http {
class Listener : public std::enable_shared_from_this<Listener> {
private:
tcp::acceptor m_acceptor;
std::string m_baseResourceName;
public:
Listener(Executor ex, tcp::endpoint endpoint, std::string docRoot) try
: m_acceptor(ex)
, m_baseResourceName(std::move(docRoot)) //
{
m_acceptor.open(endpoint.protocol());
m_acceptor.set_option(tcp::acceptor::reuse_address(true));
m_acceptor.bind(endpoint);
m_acceptor.listen(tcp::socket::max_listen_connections);
} catch (boost::system::system_error const& se) {
logError(se.code(), "Listener");
throw;
}
// Start accepting incoming connections
void run() {
if (m_acceptor.is_open())
doAccept();
}
void doAccept() {
m_acceptor.async_accept(make_strand(m_acceptor.get_executor()),
std::bind(&Listener::onAccept, shared_from_this(), _1, _2));
}
void onAccept(error_code ec, tcp::socket sock) {
if (ec)
return logError(ec, "accept");
// Accept another connection / Create the session and run it
doAccept();
std::make_shared<Session>(std::move(sock), m_baseResourceName)->run();
}
};
void startServer(std::string address, uint16_t port, std::string docRoot, unsigned threads) {
try {
net::thread_pool ioc(std::max(1u, threads));
// Create listener and launch a listening port
tcp::endpoint ep{net::ip::make_address(address), port};
std::make_shared<Listener>( //
ioc.get_executor(), ep, std::move(docRoot))
->run();
// Run the I/O service on the requested number of threads
ioc.join();
} catch (std::exception const& e) {
logError(e);
}
}
} // namespace Http
int main() {
//Service::ServerService serverService;
/*serverService.*/ Http::startServer("127.0.0.1", 8989, "service_name", 5);
}
Particularly the send_lambda is not outdated (besides being unused), see message_generator instead
Reproducing
I can reproduce the error by replacing the data with something large enough:
Live On Coliru
dd of=test.bin seek=3 bs=1M count=0 status=none
curl -s http://127.0.0.1:8989/blrub -d #test.bin
Prints
Started session for 127.0.0.1:48884
*** read: body limit exceeded from (unknown source location)
Close session for 127.0.0.1:48884
Fixing
Indeed, you can set options on request_parser. Three lines of code changed:
http::request_parser<http::string_body> req_;
And
req_.get().clear();
req_.body_limit(8*1024*1024); // raised to 8Mb
Live On Coliru
With no further changes:
Prints
Started session for 127.0.0.1:48886
Close session for 127.0.0.1:48886
I've created a simple wrapper for boost::asio library. My wrapper consists of 4 main classes: NetServer (server), NetClient (client), NetSession (client/server session) and Network (composition class of these three which also includes all callback methods).
The problem is that the first connection client/server works flawlessly, but when I then stop server, start it again and then try to connect the client, the server just doesn't recognize the client. It seems like the acceptor callback isn't called. And client does connect to server, because first - the connection goes without errors, second - when I close the server's program, the client receives the error message WSAECONNRESET.
I've created test program which emulates the procedure written above. It does following:
Starts the server
Starts the client
Client succesfully connects to server
Stops the server
Client receives the error and disconnects itself
Starts the server again
Client again succesfully connects to server
BUT SERVER DOESN'T CALL THE ACCEPTOR CALLBACK ANYMORE
It means that in point 3 the acceptor succesfully calls the callback function, but in point 7 the acceptor doesn't call the callback.
I think I do something wrong in stop()/start() method of the server, but I can't figure out what's exactly wrong.
The source of the NetServer class:
NetServer::NetServer(Network& netRef) : net{ netRef }
{
acceptor = std::make_unique<boost::asio::ip::tcp::acceptor>(ioc);
}
NetServer::~NetServer(void)
{
ioc.stop();
if (threadStarted)
{
th.join();
threadStarted = false;
}
if (active)
stop();
}
int NetServer::start(void)
{
assert(getAcceptHandler() != nullptr);
assert(getHeaderHandler() != nullptr);
assert(getDataHandler() != nullptr);
assert(getErrorHandler() != nullptr);
closeAll();
try
{
ep = boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), srvPort);
acceptor->open(ep.protocol());
acceptor->bind(ep);
acceptor->listen();
initAccept();
}
catch (system::system_error& e)
{
return e.code().value();
}
if (!threadStarted)
{
th = std::thread([this]()
{
ioc.run();
});
threadStarted = true;
}
active = true;
return Network::NET_OK;
}
int NetServer::stop(void)
{
ioc.post(boost::bind(&NetServer::_stop, this));
return Network::NET_OK;
}
void NetServer::_stop(void)
{
boost::system::error_code ec;
acceptor->close(ec);
for (auto& s : sessions)
closeSession(s.get(), false);
active = false;
}
void NetServer::initAccept(void)
{
sock = std::make_shared<asio::ip::tcp::socket>(ioc);
acceptor->async_accept(*sock.get(), [this](const boost::system::error_code& error)
{
onAccept(error, sock);
});
}
void NetServer::onAccept(const boost::system::error_code& ec, SocketSharedPtr sock)
{
if (ec.value() == 0)
{
if (accHandler())
{
addSession(sock);
initAccept();
}
}
else
getErrorHandler()(nullptr, ec);
}
SessionPtr NetServer::addSession(SocketSharedPtr sock)
{
std::lock_guard<std::mutex> guard(mtxSession);
auto session = std::make_shared<NetSession>(sock, *this, true);
sessions.insert(session);
session->start();
return session;
}
SessionPtr NetServer::findSession(const SessionPtr session)
{
for (auto it = std::begin(sessions); it != std::end(sessions); it++)
if (*it == session)
return *it;
return nullptr;
}
bool NetServer::closeSession(const void *session, bool erase /* = true */)
{
std::lock_guard<std::mutex> guard(mtxSession);
for (auto it = std::begin(sessions); it != std::end(sessions); it++)
if (it->get() == session)
{
try
{
it->get()->getSocket()->cancel();
it->get()->getSocket()->shutdown(asio::socket_base::shutdown_send);
it->get()->getSocket()->close();
it->get()->getSocket().reset();
}
catch (system::system_error& e)
{
UNREFERENCED_PARAMETER(e);
}
if (erase)
sessions.erase(*it);
return true;
}
return false;
}
void NetServer::closeAll(void)
{
using namespace boost::placeholders;
std::lock_guard<std::mutex> guard(mtxSession);
std::for_each(sessions.begin(), sessions.end(), boost::bind(&NetSession::stop, _1));
sessions.clear();
}
bool NetServer::write(const SessionPtr session, std::string msg)
{
if (SessionPtr s = findSession(session); s)
{
s->addMessage(msg);
if (s->canWrite())
s->write();
return true;
}
return false;
}
This is the output from the server:
Enter 0 - server, 1 - client: 0
1. Server started
3. Client connected to server
Stopping server....
4. Server stopped
Net error, server, acceptor: ERROR_OPERATION_ABORTED
Net error, server, ERROR_OPERATION_ABORTED
Client session deleted
6. Server started again
(HERE SHOULD BE "8. Client again connected to server", but the server didn't recognize the reconnected client!)
And from the client:
Enter 0 - server, 1 - client: 1
2. Client started and connected to server
Net error, client: ERROR_FILE_NOT_FOUND
5. Client disconnected from server
Waiting 3 sec before reconnect...
Connecting to server...
7. Client started and connected to server
(WHEN I CLOSE THE SERVER WINDOW, I RECVEIVE HERE THE "Net error, client: WSAECONNRESET" MESSAGE - it means client was connected to server anyhow!)
If the code of NetClient, NetSession and Network is necessary, just let me know.
Thanks in advance
Wow. There's a lot to unpack. There is quite a lot of code smell that reminds me of some books on Asio programming that turned out to be... not excellent in my previous experience.
I couldn't give any real advice without grokking your code, which requires me to review in-depth and add missing bits. So let me just provide you with my reviewed/fixed code first, then we'll talk about some of the details.
A few areas where you seemed to have trouble making up your mind:
whether to use a strand or to use mutex locking
whether to use async or sync (e.g. closeSession is completely synchronous and blokcing)
whether to use shared-pointers for lifetime or not: on the one hand you have NetSesion support shared_from_this, but on the other hand you are keeping them alive in a sessions collection.
whether to use smart pointers or raw pointers (sp.get() is a code smell)
whether to use void* pointers or forward declared structs for opaque implementation
whether to use exceptions or to use error codes. Specifically:
return e.code().value();
is a Very Bad Idea. Just return error_code already. Or just propagate the exception.
judging from the use, my best bet is that sessions is std::set<SessionPtr>. Then it's funny that you're doing linear searches. In fact, findSession could be:
SessionPtr findSession(SessionPtr const& session) {
std::lock_guard guard(mtxSessions);
return sessions.contains(session)? session: nullptr;
}
In fact, given some natural invariants, it could just be
auto findSession(SessionPtr s) { return std::move(s); }
Note as well, you had forgotten to lock the mutex in findSession
closeSession completely violates Law Of Demeter, 6*3 times over if you will. In my example I make it so SessHandle is a weak pointer to NetSession and you can just write:
for (auto& handle : sessions)
if (auto sess = handle.lock())
sess->close();
Of course, sess->close() should not block
Also, it should correctly synchronize on the session e.g. using the sessions strand:
void close() {
return post(sock_.get_executor(), [this, self = shared_from_this()] {
error_code ec;
if (!ec) sock_.cancel(ec);
if (!ec) sock_.shutdown(tcp::socket::shutdown_send, ec);
if (!ec) sock_.close(ec);
});
}
If you insist, you can make it so the caller can still await the result and receive any exceptions:
std::future<void> close() {
return post(
sock_.get_executor(),
std::packaged_task<void()>{[this, self = shared_from_this()] {
sock_.cancel();
sock_.shutdown(tcp::socket::shutdown_send);
sock_.close();
}});
}
Honestly, that seems overkill since you never look at the return value anyways.
In general, I recommend leaving socket::close() to the destructor. It avoids a specific class of race-conditions on socket handles.
Don't use boolean flags (isThreadActive is better replaced with th.joinable())
apparently you had NetSession::stop which I imagine did largely the same as closeSession but in the right place? I replaced it with the new NetSession::close
subtly when accHandler returned false, you would exit the accept loop alltogether. I doubt that was on purpose
try to minimize time under locks:
std::future<void> close() {
return post(
sock_.get_executor(),
std::packaged_task<void()>{[this, self = shared_from_this()] {
sock_.cancel();
sock_.shutdown(tcp::socket::shutdown_send);
sock_.close();
}});
}
I will show you how to do without the lock entirely instead.
Demo Listing
#include <boost/asio.hpp>
#include <boost/system/error_code.hpp>
#include <deque>
#include <iostream>
#include <iomanip>
#include <set>
using namespace std::chrono_literals;
using namespace std::placeholders;
namespace asio = boost::asio;
using asio::ip::tcp;
using boost::system::error_code;
static inline std::ostream debug(std::cerr.rdbuf());
struct Network {
static constexpr error_code NET_OK{};
};
struct NetSession; // opaque forward reference
struct NetServer;
using SessHandle = std::weak_ptr<NetSession>; // opaque handle
using Sessions = std::set<SessHandle, std::owner_less<>>;
struct NetSession : std::enable_shared_from_this<NetSession> {
NetSession(tcp::socket&& s, NetServer& srv, bool)
: sock_(std::move(s))
, srv_(srv) {
debug << "New session from " << getPeer() << std::endl;
}
void start() {
post(sock_.get_executor(),
std::bind(&NetSession::do_read, shared_from_this()));
}
tcp::endpoint getPeer() const { return peer_; }
void close() {
return post(sock_.get_executor(), [this, self = shared_from_this()] {
debug << "Closing " << getPeer() << std::endl;
error_code ec;
if (!ec) sock_.cancel(ec);
if (!ec) sock_.shutdown(tcp::socket::shutdown_send, ec);
// if (!ec) sock_.close(ec);
});
}
void addMessage(std::string msg) {
post(sock_.get_executor(),
[this, msg = std::move(msg), self = shared_from_this()] {
outgoing_.push_back(std::move(msg));
if (canWrite())
write_loop();
});
}
private:
// assumed on (logical) strand
bool canWrite() const { // FIXME misnomer: shouldStartWriteLoop()?
return outgoing_.size() == 1;
}
void write_loop() {
if (outgoing_.empty())
return;
async_write(sock_, asio::buffer(outgoing_.front()),
[this, self = shared_from_this()](error_code ec, size_t) {
if (!ec) {
outgoing_.pop_front();
write_loop();
}
});
}
void do_read() {
incoming_.clear();
async_read_until(
sock_, asio::dynamic_buffer(incoming_), "\n",
std::bind(&NetSession::on_read, shared_from_this(), _1, _2));
}
void on_read(error_code ec, size_t);
tcp::socket sock_;
tcp::endpoint peer_ = sock_.remote_endpoint();
NetServer& srv_;
std::string incoming_;
std::deque<std::string> outgoing_;
};
using SessionPtr = std::shared_ptr<NetSession>;
using SocketSharedPtr = std::shared_ptr<tcp::socket>;
struct NetServer {
NetServer(Network& netRef) : net{netRef} {}
~NetServer()
{
if (acceptor.is_open())
acceptor.cancel(); // TODO seems pretty redundant
stop();
if (th.joinable())
th.join();
}
std::function<bool()> accHandler;
std::function<void(SocketSharedPtr, error_code)> errHandler;
// TODO headerHandler
std::function<void(SessionPtr, error_code, std::string)> dataHandler;
error_code start() {
assert(accHandler);
assert(errHandler);
assert(dataHandler);
closeAll(sessions);
error_code ec;
if (!ec) acceptor.open(tcp::v4(), ec);
if (!ec) acceptor.bind({{}, srvPort}, ec);
if (!ec) acceptor.listen(tcp::socket::max_listen_connections, ec);
if (!ec) {
do_accept();
if (!th.joinable()) {
th = std::thread([this] { ioc.run(); }); // TODO exceptions!
}
}
if (ec && acceptor.is_open())
acceptor.close();
return ec;
}
void stop() { //
post(ioc, std::bind(&NetServer::do_stop, this));
}
void closeSession(SessHandle handle, bool erase = true) {
post(acceptor.get_executor(), [=, this] {
if (auto s = handle.lock()) {
s->close();
}
if (erase) {
sessions.erase(handle);
}
});
}
void closeAll() {
post(acceptor.get_executor(), [this] {
closeAll(sessions);
sessions.clear();
});
}
// TODO FIXME is the return value worth it?
bool write(SessionPtr const& session, std::string msg) {
return post(acceptor.get_executor(),
std::packaged_task<bool()>{std::bind(
&NetServer::do_write, this, session, std::move(msg))})
.get();
}
// compare
void writeAll(std::string msg) {
post(acceptor.get_executor(),
std::bind(&NetServer::do_write_all, this, std::move(msg)));
}
private:
Network& net;
asio::io_context ioc;
tcp::acceptor acceptor{ioc}; // active -> acceptor.is_open()
std::thread th; // threadActive -> th.joinable()
Sessions sessions;
std::uint16_t srvPort = 8989;
// std::mutex mtxSessions; // note naming; also replaced by logical strand
// assumed on acceptor logical strand
void do_accept() {
acceptor.async_accept(
make_strand(ioc), [this](error_code ec, tcp::socket sock) {
if (ec.failed()) {
return errHandler(nullptr, ec);
}
if (accHandler()) {
auto s = std::make_shared<NetSession>(std::move(sock),
*this, true);
sessions.insert(s);
s->start();
}
do_accept();
});
}
SessionPtr do_findSession(SessionPtr const& session) {
return sessions.contains(session) ? session : nullptr;
}
bool do_write(SessionPtr session, std::string msg) {
if (auto s = do_findSession(session)) {
s->addMessage(std::move(msg));
return true;
}
return false;
}
void do_write_all(std::string msg) {
for(auto& handle : sessions)
if (auto sess = handle.lock())
do_write(sess, msg);
}
static void closeAll(Sessions const& sessions) {
for (auto& handle : sessions)
if (auto sess = handle.lock())
sess->close();
}
void do_stop()
{
if (acceptor.is_open()) {
error_code ec;
acceptor.close(ec); // TODO error handling?
}
closeAll(sessions); // TODO FIXME why not clear sessions?
}
};
// Implementation must be after NetServer definition:
void NetSession::on_read(error_code ec, size_t) {
if (srv_.dataHandler)
srv_.dataHandler(shared_from_this(), ec, std::move(incoming_));
if (!ec)
do_read();
}
int main() {
Network net;
NetServer srv{net};
srv.accHandler = [] { return true; };
srv.errHandler = [](SocketSharedPtr, error_code ec) {
debug << "errHandler: " << ec.message() << std::endl;
};
srv.dataHandler = [](SessionPtr sess, error_code ec, std::string msg) {
debug << "dataHandler: " << sess->getPeer() << " " << ec.message()
<< " " << std::quoted(msg) << std::endl;
};
srv.start();
std::this_thread::sleep_for(10s);
std::cout << "Shutdown started" << std::endl;
srv.writeAll("We're going to shutdown, take care!\n");
srv.stop();
}
Live Demo:
I have next snippet:
void TcpConnection::Send(const std::vector<uint8_t>& buffer) {
std::shared_ptr<std::vector<uint8_t>> bufferCopy = std::make_shared<std::vector<uint8_t>>(buffer);
auto socket = m_socket;
m_socket->async_send(asio::buffer(bufferCopy->data(), bufferCopy->size()), [socket, bufferCopy](const boost::system::error_code& err, size_t bytesSent)
{
if (err)
{
logwarning << "clientcomms_t::sendNext encountered error: " << err.message();
// Assume that the communications path is no longer
// valid.
socket->close();
}
});
}
This code leads to memory leak. if m_socket->async_send call is commented then there is not memeory leak. I can not understand why bufferCopy is not freed after callback is dispatched. What I am doing wrong?
Windows is used.
Since you don't show any relevant code, and the code shown does not contain a strict problem, I'm going to assume from the code smells.
The smell is that you have a TcpConnection class that is not enable_shared_from_this<TcpConnection> derived. This leads me to suspect you didn't plan ahead, because there's no possible reasonable way to continue using the instance after the completion of any asynchronous operation (like the async_send).
This leads me to suspect you have a crucially simple problem, which is that your completion handler never runs. There's only one situation that could explain this, and that leads me to assume you never run() the ios_service instance
Here's the situation live:
Live On Coliru
#include <boost/asio.hpp>
namespace asio = boost::asio;
using asio::ip::tcp;
#include <iostream>
auto& logwarning = std::clog;
struct TcpConnection {
using Buffer = std::vector<uint8_t>;
void Send(Buffer const &);
TcpConnection(asio::io_service& svc) : m_socket(std::make_shared<tcp::socket>(svc)) {}
tcp::socket& socket() const { return *m_socket; }
private:
std::shared_ptr<tcp::socket> m_socket;
};
void TcpConnection::Send(Buffer const &buffer) {
auto bufferCopy = std::make_shared<Buffer>(buffer);
auto socket = m_socket;
m_socket->async_send(asio::buffer(bufferCopy->data(), bufferCopy->size()),
[socket, bufferCopy](const boost::system::error_code &err, size_t /*bytesSent*/) {
if (err) {
logwarning << "clientcomms_t::sendNext encountered error: " << err.message();
// Assume that the communications path is no longer
// valid.
socket->close();
}
});
}
int main() {
asio::io_service svc;
tcp::acceptor a(svc, tcp::v4());
a.bind({{}, 6767});
a.listen();
boost::system::error_code ec;
do {
TcpConnection conn(svc);
a.accept(conn.socket(), ec);
char const* greeting = "whale hello there!\n";
conn.Send({greeting, greeting+strlen(greeting)});
} while (!ec);
}
You'll see that any client, connection e.g. with netcat localhost 6767 will receive the greeting, after which, surprisingly the connection will stay open, instead of being closed.
You'd expect the connection to be closed by the server side either way, either because
a transmission error occurred in async_send
or because after the completion handler is run, it is destroyed and hence the captured shared-pointers are destructed. Not only would that free the copied buffer, but also would it run the destructor of socket which would close the connection.
This clearly confirms that the completion handler never runs. The fix is "easy", find a place to run the service:
int main() {
asio::io_service svc;
tcp::acceptor a(svc, tcp::v4());
a.set_option(tcp::acceptor::reuse_address());
a.bind({{}, 6767});
a.listen();
std::thread th;
{
asio::io_service::work keep(svc); // prevent service running out of work early
th = std::thread([&svc] { svc.run(); });
boost::system::error_code ec;
for (int i = 0; i < 11 && !ec; ++i) {
TcpConnection conn(svc);
a.accept(conn.socket(), ec);
char const* greeting = "whale hello there!\n";
conn.Send({greeting, greeting+strlen(greeting)});
}
}
th.join();
}
This runs 11 connections and exits leak-free.
Better:
It becomes a lot cleaner when the accept loop is also async, and the TcpConnection is properly shared as hinted above:
Live On Coliru
#include <boost/asio.hpp>
namespace asio = boost::asio;
using asio::ip::tcp;
#include <memory>
#include <thread>
#include <iostream>
auto& logwarning = std::clog;
struct TcpConnection : std::enable_shared_from_this<TcpConnection> {
using Buffer = std::vector<uint8_t>;
TcpConnection(asio::io_service& svc) : m_socket(svc) {}
void start() {
char const* greeting = "whale hello there!\n";
Send({greeting, greeting+strlen(greeting)});
}
void Send(Buffer);
private:
friend struct Server;
Buffer m_output;
tcp::socket m_socket;
};
struct Server {
Server(unsigned short port) {
_acceptor.set_option(tcp::acceptor::reuse_address());
_acceptor.bind({{}, port});
_acceptor.listen();
do_accept();
}
~Server() {
keep.reset();
_svc.post([this] { _acceptor.cancel(); });
if (th.joinable())
th.join();
}
private:
void do_accept() {
auto conn = std::make_shared<TcpConnection>(_svc);
_acceptor.async_accept(conn->m_socket, [this,conn](boost::system::error_code ec) {
if (ec)
logwarning << "accept failed: " << ec.message() << "\n";
else {
conn->start();
do_accept();
}
});
}
asio::io_service _svc;
// prevent service running out of work early:
std::unique_ptr<asio::io_service::work> keep{std::make_unique<asio::io_service::work>(_svc)};
std::thread th{[this]{_svc.run();}}; // TODO handle handler exceptions
tcp::acceptor _acceptor{_svc, tcp::v4()};
};
void TcpConnection::Send(Buffer buffer) {
m_output = std::move(buffer);
auto self = shared_from_this();
m_socket.async_send(asio::buffer(m_output),
[self](const boost::system::error_code &err, size_t /*bytesSent*/) {
if (err) {
logwarning << "clientcomms_t::sendNext encountered error: " << err.message() << "\n";
// not holding on to `self` means the socket gets closed
}
// do more with `self` which points to the TcpConnection instance...
});
}
int main() {
Server server(6868);
std::this_thread::sleep_for(std::chrono::seconds(3));
}
According to the documentation:
"The program must ensure that the stream performs no other write operations (such as async_write, the stream's async_write_some function, or any other composed operations that perform writes) until this operation completes."
Does this mean, I cannot call boost::asio::async_write a second time until the handler for the first is called? How does one achieve this and still be asynchronous?
If I have a method Send:
//--------------------------------------------------------------------
void Connection::Send(const std::vector<char> & data)
{
auto callback = boost::bind(&Connection::OnSend, this, boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred);
boost::asio::async_write(m_socket, boost::asio::buffer(data), callback);
}
Do I have to change it to something like:
//--------------------------------------------------------------------
void Connection::Send(const std::vector<char> & data)
{
// Issue a send
std::lock_guard<std::mutex> lock(m_numPostedSocketIOMutex);
++m_numPostedSocketIO;
m_numPostedSocketIOConditionVariable.wait(lock, [this]() {return m_numPostedSocketIO == 0; });
auto callback = boost::bind(&Connection::OnSend, this, boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred);
boost::asio::async_write(m_socket, boost::asio::buffer(data), callback);
}
and if so, then aren't I blocking after the first call again?
The async in async_write() refers to the fact that the function returns immediately while the writing happens in background. There should still be only one outstanding write at any given time.
You need to use a buffer if you have an asynchronous producer to set aside the new chunk of data until the currently active write completes, then issue a new async_write in the completion handler.
That is, Connection::Send must only call async_write once to kick off the process, in subsequent calls it should instead buffer its data, which will be picked up in the completion handler of the currently executing async_write.
For performance reasons you want to avoid copying the data into the buffer, and instead append the new chunk to a list of buffers and use the scatter-gather overload of async_write that accepts a ConstBufferSequence. It is also possible to use one large streambuf as a buffer and append directly into it.
Of course the buffer needs to be synchronized unless both Connection::Send and the io_service run in the same thread. An empty buffer can be reused as an indication that no async_write is in progress.
Here's some code to illustrate what I mean:
struct Connection
{
void Connection::Send(std::vector<char>&& data)
{
std::lock_guard<std::mutex> lock(buffer_mtx);
buffers[active_buffer ^ 1].push_back(std::move(data)); // move input data to the inactive buffer
doWrite();
}
private:
void Connection::doWrite()
{
if (buffer_seq.empty()) { // empty buffer sequence == no writing in progress
active_buffer ^= 1; // switch buffers
for (const auto& data : buffers[active_buffer]) {
buffer_seq.push_back(boost::asio::buffer(data));
}
boost::asio::async_write(m_socket, buffer_seq, [this] (const boost::system::error_code& ec, size_t bytes_transferred) {
std::lock_guard<std::mutex> lock(buffer_mtx);
buffers[active_buffer].clear();
buffer_seq.clear();
if (!ec) {
if (!buffers[active_buffer ^ 1].empty()) { // have more work
doWrite();
}
}
});
}
}
std::mutex buffer_mtx;
std::vector<std::vector<char>> buffers[2]; // a double buffer
std::vector<boost::asio::const_buffer> buffer_seq;
int active_buffer = 0;
. . .
};
The complete working source can be found in this answer.
Yes you need to wait for completion handler before calling async_write again. Are you sure you'll be blocked? Of course it depends on how fast you generate your data, but even if yes there's no way to send it faster than your network can handle it. If it's really an issue consider sending bigger chunks.
Here is a complete, compilable, and tested, example, that I researched and got to work through trial and error after reading the answer and subsequent edits from RustyX.
Connection.h
#pragma once
#include <boost/asio.hpp>
#include <atomic>
#include <condition_variable>
#include <memory>
#include <mutex>
//--------------------------------------------------------------------
class ConnectionManager;
//--------------------------------------------------------------------
class Connection : public std::enable_shared_from_this<Connection>
{
public:
typedef std::shared_ptr<Connection> SharedPtr;
// Ensure all instances are created as shared_ptr in order to fulfill requirements for shared_from_this
static Connection::SharedPtr Create(ConnectionManager * connectionManager, boost::asio::ip::tcp::socket & socket);
//
static std::string ErrorCodeToString(const boost::system::error_code & errorCode);
Connection(const Connection &) = delete;
Connection(Connection &&) = delete;
Connection & operator = (const Connection &) = delete;
Connection & operator = (Connection &&) = delete;
~Connection();
// We have to defer the start until we are fully constructed because we share_from_this()
void Start();
void Stop();
void Send(const std::vector<char> & data);
private:
static size_t m_nextClientId;
size_t m_clientId;
ConnectionManager * m_owner;
boost::asio::ip::tcp::socket m_socket;
std::atomic<bool> m_stopped;
boost::asio::streambuf m_receiveBuffer;
mutable std::mutex m_sendMutex;
std::vector<char> m_sendBuffers[2]; // Double buffer
int m_activeSendBufferIndex;
bool m_sending;
std::vector<char> m_allReadData; // Strictly for test purposes
Connection(ConnectionManager * connectionManager, boost::asio::ip::tcp::socket socket);
void DoReceive();
void DoSend();
};
//--------------------------------------------------------------------
Connection.cpp
#include "Connection.h"
#include "ConnectionManager.h"
#include <boost/bind.hpp>
#include <algorithm>
#include <cstdlib>
//--------------------------------------------------------------------
size_t Connection::m_nextClientId(0);
//--------------------------------------------------------------------
Connection::SharedPtr Connection::Create(ConnectionManager * connectionManager, boost::asio::ip::tcp::socket & socket)
{
return Connection::SharedPtr(new Connection(connectionManager, std::move(socket)));
}
//--------------------------------------------------------------------------------------------------
std::string Connection::ErrorCodeToString(const boost::system::error_code & errorCode)
{
std::ostringstream debugMsg;
debugMsg << " Error Category: " << errorCode.category().name() << ". "
<< " Error Message: " << errorCode.message() << ". ";
// IMPORTANT - These comparisons only work if you dynamically link boost libraries
// Because boost chose to implement boost::system::error_category::operator == by comparing addresses
// The addresses are different in one library and the other when statically linking.
//
// We use make_error_code macro to make the correct category as well as error code value.
// Error code value is not unique and can be duplicated in more than one category.
if (errorCode == boost::asio::error::make_error_code(boost::asio::error::connection_refused))
{
debugMsg << " (Connection Refused)";
}
else if (errorCode == boost::asio::error::make_error_code(boost::asio::error::eof))
{
debugMsg << " (Remote host has disconnected)";
}
else
{
debugMsg << " (boost::system::error_code has not been mapped to a meaningful message)";
}
return debugMsg.str();
}
//--------------------------------------------------------------------
Connection::Connection(ConnectionManager * connectionManager, boost::asio::ip::tcp::socket socket)
:
m_clientId (m_nextClientId++)
, m_owner (connectionManager)
, m_socket (std::move(socket))
, m_stopped (false)
, m_receiveBuffer ()
, m_sendMutex ()
, m_sendBuffers ()
, m_activeSendBufferIndex (0)
, m_sending (false)
, m_allReadData ()
{
printf("Client connection with id %zd has been created.", m_clientId);
}
//--------------------------------------------------------------------
Connection::~Connection()
{
// Boost uses RAII, so we don't have anything to do. Let thier destructors take care of business
printf("Client connection with id %zd has been destroyed.", m_clientId);
}
//--------------------------------------------------------------------
void Connection::Start()
{
DoReceive();
}
//--------------------------------------------------------------------
void Connection::Stop()
{
// The entire connection class is only kept alive, because it is a shared pointer and always has a ref count
// as a consequence of the outstanding async receive call that gets posted every time we receive.
// Once we stop posting another receive in the receive handler and once our owner release any references to
// us, we will get destroyed.
m_stopped = true;
m_owner->OnConnectionClosed(shared_from_this());
}
//--------------------------------------------------------------------
void Connection::Send(const std::vector<char> & data)
{
std::lock_guard<std::mutex> lock(m_sendMutex);
// Append to the inactive buffer
std::vector<char> & inactiveBuffer = m_sendBuffers[m_activeSendBufferIndex ^ 1];
inactiveBuffer.insert(inactiveBuffer.end(), data.begin(), data.end());
//
DoSend();
}
//--------------------------------------------------------------------
void Connection::DoSend()
{
// Check if there is an async send in progress
// An empty active buffer indicates there is no outstanding send
if (m_sendBuffers[m_activeSendBufferIndex].empty())
{
m_activeSendBufferIndex ^= 1;
std::vector<char> & activeBuffer = m_sendBuffers[m_activeSendBufferIndex];
auto self(shared_from_this());
boost::asio::async_write(m_socket, boost::asio::buffer(activeBuffer),
[self](const boost::system::error_code & errorCode, size_t bytesTransferred)
{
std::lock_guard<std::mutex> lock(self->m_sendMutex);
self->m_sendBuffers[self->m_activeSendBufferIndex].clear();
if (errorCode)
{
printf("An error occured while attemping to send data to client id %zd. %s", self->m_clientId, ErrorCodeToString(errorCode).c_str());
// An error occurred
// We do not stop or close on sends, but instead let the receive error out and then close
return;
}
// Check if there is more to send that has been queued up on the inactive buffer,
// while we were sending what was on the active buffer
if (!self->m_sendBuffers[self->m_activeSendBufferIndex ^ 1].empty())
{
self->DoSend();
}
});
}
}
//--------------------------------------------------------------------
void Connection::DoReceive()
{
auto self(shared_from_this());
boost::asio::async_read_until(m_socket, m_receiveBuffer, '#',
[self](const boost::system::error_code & errorCode, size_t bytesRead)
{
if (errorCode)
{
// Check if the other side hung up
if (errorCode == boost::asio::error::make_error_code(boost::asio::error::eof))
{
// This is not really an error. The client is free to hang up whenever they like
printf("Client %zd has disconnected.", self->m_clientId);
}
else
{
printf("An error occured while attemping to receive data from client id %zd. Error Code: %s", self->m_clientId, ErrorCodeToString(errorCode).c_str());
}
// Notify our masters that we are ready to be destroyed
self->m_owner->OnConnectionClosed(self);
// An error occured
return;
}
// Grab the read data
std::istream stream(&self->m_receiveBuffer);
std::string data;
std::getline(stream, data, '#');
data += "#";
printf("Received data from client %zd: %s", self->m_clientId, data.c_str());
// Issue the next receive
if (!self->m_stopped)
{
self->DoReceive();
}
});
}
//--------------------------------------------------------------------
ConnectionManager.h
#pragma once
#include "Connection.h"
// Boost Includes
#include <boost/asio.hpp>
// Standard Includes
#include <thread>
#include <vector>
//--------------------------------------------------------------------
class ConnectionManager
{
public:
ConnectionManager(unsigned port, size_t numThreads);
ConnectionManager(const ConnectionManager &) = delete;
ConnectionManager(ConnectionManager &&) = delete;
ConnectionManager & operator = (const ConnectionManager &) = delete;
ConnectionManager & operator = (ConnectionManager &&) = delete;
~ConnectionManager();
void Start();
void Stop();
void OnConnectionClosed(Connection::SharedPtr connection);
protected:
boost::asio::io_service m_io_service;
boost::asio::ip::tcp::acceptor m_acceptor;
boost::asio::ip::tcp::socket m_listenSocket;
std::vector<std::thread> m_threads;
mutable std::mutex m_connectionsMutex;
std::vector<Connection::SharedPtr> m_connections;
boost::asio::deadline_timer m_timer;
void IoServiceThreadProc();
void DoAccept();
void DoTimer();
};
//--------------------------------------------------------------------
ConnectionManager.cpp
#include "ConnectionManager.h"
#include <boost/bind.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
#include <system_error>
#include <cstdio>
//------------------------------------------------------------------------------
ConnectionManager::ConnectionManager(unsigned port, size_t numThreads)
:
m_io_service ()
, m_acceptor (m_io_service, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port))
, m_listenSocket(m_io_service)
, m_threads (numThreads)
, m_timer (m_io_service)
{
}
//------------------------------------------------------------------------------
ConnectionManager::~ConnectionManager()
{
Stop();
}
//------------------------------------------------------------------------------
void ConnectionManager::Start()
{
if (m_io_service.stopped())
{
m_io_service.reset();
}
DoAccept();
for (auto & thread : m_threads)
{
if (!thread.joinable())
{
thread.swap(std::thread(&ConnectionManager::IoServiceThreadProc, this));
}
}
DoTimer();
}
//------------------------------------------------------------------------------
void ConnectionManager::Stop()
{
{
std::lock_guard<std::mutex> lock(m_connectionsMutex);
m_connections.clear();
}
// TODO - Will the stopping of the io_service be enough to kill all the connections and ultimately have them get destroyed?
// Because remember they have outstanding ref count to thier shared_ptr in the async handlers
m_io_service.stop();
for (auto & thread : m_threads)
{
if (thread.joinable())
{
thread.join();
}
}
}
//------------------------------------------------------------------------------
void ConnectionManager::IoServiceThreadProc()
{
try
{
// Log that we are starting the io_service thread
{
printf("io_service socket thread starting.");
}
// Run the asynchronous callbacks from the socket on this thread
// Until the io_service is stopped from another thread
m_io_service.run();
}
catch (std::system_error & e)
{
printf("System error caught in io_service socket thread. Error Code: %d", e.code().value());
}
catch (std::exception & e)
{
printf("Standard exception caught in io_service socket thread. Exception: %s", e.what());
}
catch (...)
{
printf("Unhandled exception caught in io_service socket thread.");
}
{
printf("io_service socket thread exiting.");
}
}
//------------------------------------------------------------------------------
void ConnectionManager::DoAccept()
{
m_acceptor.async_accept(m_listenSocket,
[this](const boost::system::error_code errorCode)
{
if (errorCode)
{
printf("An error occured while attemping to accept connections. Error Code: %s", Connection::ErrorCodeToString(errorCode).c_str());
return;
}
// Create the connection from the connected socket
std::lock_guard<std::mutex> lock(m_connectionsMutex);
Connection::SharedPtr connection = Connection::Create(this, m_listenSocket);
m_connections.push_back(connection);
connection->Start();
DoAccept();
});
}
//------------------------------------------------------------------------------
void ConnectionManager::OnConnectionClosed(Connection::SharedPtr connection)
{
std::lock_guard<std::mutex> lock(m_connectionsMutex);
auto itConnection = std::find(m_connections.begin(), m_connections.end(), connection);
if (itConnection != m_connections.end())
{
m_connections.erase(itConnection);
}
}
//------------------------------------------------------------------------------
void ConnectionManager::DoTimer()
{
if (!m_io_service.stopped())
{
// Send messages every second
m_timer.expires_from_now(boost::posix_time::seconds(30));
m_timer.async_wait(
[this](const boost::system::error_code & errorCode)
{
std::lock_guard<std::mutex> lock(m_connectionsMutex);
for (auto connection : m_connections)
{
connection->Send(std::vector<char>{'b', 'e', 'e', 'p', '#'});
}
DoTimer();
});
}
}
main.cpp
#include "ConnectionManager.h"
#include <cstring>
#include <iostream>
#include <string>
int main()
{
// Start up the server
ConnectionManager connectionManager(5000, 2);
connectionManager.Start();
// Pretend we are doing other things or just waiting for shutdown
std::this_thread::sleep_for(std::chrono::minutes(5));
// Stop the server
connectionManager.Stop();
return 0;
}
Could we use 2 strands for this question by posting write(...) as an asynchronous operation to strand1 and handler(...) to strand2?
Your advices on the code would be highly appreciated.
boost::asio::strand<boost::asio::io_context::executor_type> strand1, strand2;
std::vector<char> empty_vector(0);
void Connection::Send(const std::vector<char> & data)
{
boost::asio::post(boost::asio::bind_executor(strand1, std::bind(&Connection::write, this, true, data)));
}
void Connection::write(bool has_data, const std::vector<char> & data)
{
// Append to the inactive buffer
std::vector<char> & inactiveBuffer = m_sendBuffers[m_activeSendBufferIndex ^ 1];
if (has_data)
{
inactiveBuffer.insert(inactiveBuffer.end(), data.begin(), data.end());
}
//
if (!inactiveBuffer.empty() && m_sendBuffers[m_activeSendBufferIndex].empty())
{
m_activeSendBufferIndex ^= 1;
std::vector<char> & activeBuffer = m_sendBuffers[m_activeSendBufferIndex];
boost::asio::async_write(m_socket, boost::asio::buffer(activeBuffer), boost::asio::bind_executor(strand2, std::bind(&Connection::handler, this, boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred)));
}
}
void Connection::handler(const boost::system::error_code & errorCode, size_t bytesTransferred)
{
self->m_sendBuffers[self->m_activeSendBufferIndex].clear();
if (errorCode)
{
printf("An error occured while attemping to send data to client id %zd. %s", self->m_clientId, ErrorCodeToString(errorCode).c_str());
// An error occurred
// We do not stop or close on sends, but instead let the receive error out and then close
return;
}
boost::asio::post(boost::asio::bind_executor(strand1, std::bind(&Connection::write, this, false, empty_vector)));
}
}
I'm very beginner with boost::asio, so please help me.
I need write single-threaded TCP server . Server should accept client connections and continuously read from client sockets for input data. Periodically server should send data to clients . So I have some kind of problem - all examples describe case when we always have loop
async_receive()
on_receive() -> async_write()
on_write() -> goto 1 :)
So my decision was to use timer for checking for data to be send to socket.
I wrote test server and have very strange behavior - it's work ok if clients connected, do something and disconnected one after another with some time delta . But if all clients disconnected simultaneously I have
situation when timer handler try to use member classes of already DESTROYED object (locking critical section).
I can't describe why ! Please help !
[This video show how it's reproduced] (http://www.youtube.com/watch?v=NMWkD7rqf7Y&feature=youtu.be "1080p" )
Thank you !
#include <boost/none.hpp>
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <iostream>
using namespace boost::asio;
using namespace boost::posix_time;
class CIncommingConnection ;
typedef boost::shared_ptr<CIncommingConnection> CIncommingConnectionPtr;
struct IIncomingServer
{
virtual void OnData(CIncommingConnectionPtr pConn, const char *pData, size_t bytes) = 0;
virtual void OnConnected(CIncommingConnectionPtr pConn) = 0;
virtual void OnDisconnected(const boost::system::error_code& err, CIncommingConnectionPtr pConn) = 0;
};
class CAutoLock
{
public:
CAutoLock(CRITICAL_SECTION &cs) :
m_cs(cs)
{
::EnterCriticalSection(&m_cs);
}
~CAutoLock()
{
::LeaveCriticalSection(&m_cs);
}
private:
CRITICAL_SECTION &m_cs;
};
class CIncommingConnection : public boost::enable_shared_from_this<CIncommingConnection>
,boost::noncopyable
{
public:
CIncommingConnection(const std::string sPeerName, boost::asio::io_service &service, IIncomingServer *pServer) :
m_service(service)
,sock_(service)
,m_sPeerName(sPeerName)
,m_pServer(pServer)
,m_timer(service)
{
::InitializeCriticalSection(&m_cs);
std::cout << "CIncommingConnection()" << std::endl ;
}
~CIncommingConnection()
{
std::cout << "CIncommingConnection()~" << std::endl ;
::DeleteCriticalSection(&m_cs);
}
ip::tcp::socket & sock()
{
return sock_;
}
void start()
{
m_pServer->OnConnected(shared_from_this());
do_read();
wait_for_outgoingdata();
}
private:
void stop()
{
sock_.close();
m_timer.cancel();
}
void do_read()
{
sock_.async_receive(buffer(read_buffer_), boost::bind(&CIncommingConnection::handler_read, this, _1, _2) );
}
void do_error(const boost::system::error_code& error)
{
CIncommingConnectionPtr pConn = shared_from_this();
stop() ;
m_pServer->OnDisconnected(error, pConn);
}
void handler_read(const boost::system::error_code& error, std::size_t bytes)
{
if (error)
{
do_error(error);
return ;
}
CIncommingConnectionPtr pConn = shared_from_this() ;
m_pServer->OnData(pConn, read_buffer_, bytes);
do_read();
}
void wait_for_outgoingdata()
{
m_timer.expires_from_now( boost::posix_time::millisec( 100 ) );
m_timer.async_wait( boost::bind( &CIncommingConnection::on_output_queue_timer, this, _1 ) );
}
void on_output_queue_timer(const boost::system::error_code& error)
{
if (error == boost::asio::error::operation_aborted)
{
return ;
}
CAutoLock oLock(m_cs);
if (!m_sOutBuf.empty())
sock_.async_send(buffer(m_sOutBuf), boost::bind(&CIncommingConnection::handler_write, this, _1, _2) );
else
wait_for_outgoingdata();
}
void handler_write(const boost::system::error_code& error, std::size_t bytes)
{
if (error)
return ;
if (bytes)
{
m_sOutBuf = m_sOutBuf.substr(bytes, m_sOutBuf.length()-bytes);
}
wait_for_outgoingdata();
}
private:
ip::tcp::socket sock_;
enum { max_msg = 1024 };
char read_buffer_[max_msg];
char write_buffer_[max_msg];
boost::asio::io_service &m_service ;
std::string m_sPeerName ;
std::string m_sOutBuf;
CRITICAL_SECTION m_cs ;
IIncomingServer *m_pServer;
boost::asio::deadline_timer m_timer;
};
class CIncomingServer : public boost::enable_shared_from_this<CIncomingServer>
, public IIncomingServer
, boost::noncopyable
{
public:
CIncomingServer(boost::asio::io_service &service,
unsigned int port,
bool bAllowManyConnections,
const std::string sPeerName) :
m_acceptor (service, ip::tcp::endpoint(ip::tcp::v4(), port), false)
,m_sPeerName(sPeerName)
,m_port(port)
,m_service(service)
,m_timer(service)
,m_bAllowManyConnections(bAllowManyConnections)
{
}
~CIncomingServer()
{
}
void run()
{
CIncommingConnectionPtr pConn (new CIncommingConnection(m_sPeerName, m_service, this));
m_clients.push_back( pConn );
m_acceptor.async_accept(pConn->sock(), boost::bind(&CIncomingServer::handle_accept, this, _1));
m_timer.expires_from_now( boost::posix_time::millisec( 500 ) );
m_timer.async_wait( boost::bind( &CIncomingServer::on_timer, this ) );
}
private:
void handle_accept(const boost::system::error_code & err)
{
m_clients.back()->start();
CIncommingConnectionPtr pConnNew (new CIncommingConnection(m_sPeerName, m_service, this));
m_clients.push_back( pConnNew );
m_acceptor.async_accept(pConnNew->sock(), boost::bind(&CIncomingServer::handle_accept, this, _1));
}
//IIncomingServer
virtual void OnData(CIncommingConnectionPtr pConn, const char *pData, size_t bytes)
{
std::cout << "Data received" << std::endl ;
}
virtual void OnConnected(CIncommingConnectionPtr pConn)
{
std::cout << "Client connected" << std::endl ;
}
virtual void OnDisconnected(const boost::system::error_code& err, CIncommingConnectionPtr pConn)
{
std::cout << "Client disconnected" << std::endl ;
auto it = std::find(m_clients.begin(), m_clients.end(), pConn) ;
if (it != m_clients.end())
{
m_clients.erase(it);
}
}
void on_timer()
{
//if (NeedTerminate())
//{
// m_service.stop();
// return ;
//}
m_timer.expires_from_now( boost::posix_time::millisec( 500 ) );
m_timer.async_wait( boost::bind( &CIncomingServer::on_timer, this ) );
}
private:
ip::tcp::acceptor m_acceptor ;
std::vector<CIncommingConnectionPtr> m_clients;
std::string m_sPeerName ;
unsigned int m_port ;
boost::asio::io_service &m_service ;
boost::asio::deadline_timer m_timer;
bool m_bAllowManyConnections;
};
int _tmain(int argc, _TCHAR* argv[])
{
boost::asio::io_service service ;
boost::shared_ptr<CIncomingServer> pServer;
try
{
pServer.reset( new CIncomingServer(service, 8000, false, "BS Server"));
pServer->run();
}
catch (const boost::system::system_error &err)
{
std::cout << "Error : " << err.what() << std::endl ;
return 0 ;
}
service.run();
return 0 ;
}
Long story short: you should bind the completion handlers to a shared_ptr returned from shared_from_this(), not to plain this (so called shared_from_this idiom). This way you ensure the correct automatic management of your connection objects lifespan.
Technically, the following happens now: do_error causes 2 actions to take place:
timer cancellation (which is asynchronous operation) removal of
CIncommingConnectionPtr from a container (which is synchronous
operation).
At the point (2) the connection gets destroyed, as there are no other shared_ptrs holding it. Now the timer completion handler comes... Crash!