I want my TCP client to connect to multiple servers(each server has a separate IP and port).
I am using async_connect. I can successfully connect to different servers but the read/write fails since the server's corresponding tcp::socket object is not available.
Can you please suggest how I could store each server's socket in some data structure? I tried saving the IP, socket to a std::map, but the first server's socket object is not available in memory and the app crashes. I tried making the socket static, but it does not help either.
Please help me!!
Also, I hope I am logically correct in making a single TCP client connect to 2 different servers.
I am sharing below the simplified header & cpp file.
class TCPClient: public Socket
{
public:
TCPClient(boost::asio::io_service& io_service,
boost::asio::ip::tcp::endpoint ep);
virtual ~TCPClient();
void Connect(boost::asio::ip::tcp::endpoint ep, boost::asio::io_service &ioService, void (Comm::*SaveClientDetails)(std::string,void*),
void *pClassInstance);
void TransmitData(const INT8 *pi8Buffer);
void HandleWrite(const boost::system::error_code& err,
size_t szBytesTransferred);
void HandleConnect(const boost::system::error_code &err,
void (Comm::*SaveClientDetails)(std::string,void*),
void *pClassInstance, std::string sIPAddr);
static tcp::socket* CreateSocket(boost::asio::io_service &ioService)
{ return new tcp::socket(ioService); }
static tcp::socket *mSocket;
private:
std::string sMsgRead;
INT8 i8Data[MAX_BUFFER_LENGTH];
std::string sMsg;
boost::asio::deadline_timer mTimer;
};
tcp::socket* TCPClient::mSocket = NULL;
TCPClient::TCPClient(boost::asio::io_service &ioService,
boost::asio::ip::tcp::endpoint ep) :
mTimer(ioService)
{
}
void TCPClient::Connect(boost::asio::ip::tcp::endpoint ep,
boost::asio::io_service &ioService,
void (Comm::*SaveServerDetails)(std::string,void*),
void *pClassInstance)
{
mSocket = CreateSocket(ioService);
std::string sIPAddr = ep.address().to_string();
/* To send connection request to server*/
mSocket->async_connect(ep,boost::bind(&TCPClient::HandleConnect, this,
boost::asio::placeholders::error, SaveServerDetails,
pClassInstance, sIPAddr));
}
void TCPClient::HandleConnect(const boost::system::error_code &err,
void (Comm::*SaveServerDetails)(std::string,void*),
void *pClassInstance, std::string sIPAddr)
{
if (!err)
{
Comm* pInstance = (Comm*) pClassInstance;
if (NULL == pInstance)
{
break;
}
(pInstance->*SaveServerDetails)(sIPAddr,(void*)(mSocket));
}
else
{
break;
}
}
void TCPClient::TransmitData(const INT8 *pi8Buffer)
{
sMsg = pi8Buffer;
if (sMsg.empty())
{
break;
}
mSocket->async_write_some(boost::asio::buffer(sMsg, MAX_BUFFER_LENGTH),
boost::bind(&TCPClient::HandleWrite, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
void TCPClient::HandleWrite(const boost::system::error_code &err,
size_t szBytesTransferred)
{
if (!err)
{
std::cout<< "Data written to TCP Client port! ";
}
else
{
break;
}
}
You seem to know your problem: the socket object is unavailable. That's 100% by choice. You chose to make it static, of course there will be only one instance.
Also, I hope I am logically correct in making a single TCP client connect to 2 different servers.
It sounds wrong to me. You can redefine "client" to mean something having multiple TCP connections. In that case at the very minimum you expect a container of tcp::socket objects to hold those (or, you know, a Connection object that contains the tcp::socket.
BONUS: Demo
For fun and glory, here's what I think you should be looking for.
Notes:
no more new, delete
no more void*, reinterpret casts (!!!)
less manual buffer sizing/handling
no more bind
buffer lifetimes are guaranteed for the corresponding async operations
message queues per connection
connections are on a strand for proper synchronized access to shared state in multi-threading environments
I added in a connection max idle time timeout; it also limits the time taken for any async operation (connect/write). I assumed you wanted something like this because (a) it's common (b) there was an unused deadline_timer in your question code
Note the technique of using shared pointers to have Comm manage its own lifetime. Note also that _socket and _outbox are owned by the individual Comm instance.
Live On Coliru
#include <boost/asio.hpp>
#include <deque>
#include <iostream>
using INT8 = char;
using boost::asio::ip::tcp;
using boost::system::error_code;
//using SaveFunc = std::function<void(std::string, void*)>; // TODO abolish void*
using namespace std::chrono_literals;
using duration = std::chrono::high_resolution_clock::duration;
static inline constexpr size_t MAX_BUFFER_LENGTH = 1024;
using Handle = std::weak_ptr<class Comm>;
class Comm : public std::enable_shared_from_this<Comm> {
public:
template <typename Executor>
explicit Comm(Executor ex, tcp::endpoint ep, // ex assumed to be strand
duration max_idle)
: _ep(ep)
, _max_idle(max_idle)
, _socket{ex}
, _timer{_socket.get_executor()}
{
}
~Comm() { std::cerr << "Comm closed (" << _ep << ")\n"; }
void Start() {
post(_socket.get_executor(), [this, self = shared_from_this()] {
_socket.async_connect(
_ep, [this, self = shared_from_this()](error_code ec) {
std::cerr << "Connect: " << ec.message() << std::endl;
if (!ec)
DoIdle();
else
_timer.cancel();
});
DoIdle();
});
}
void Stop() {
post(_socket.get_executor(), [this, self = shared_from_this()] {
if (not _outbox.empty())
std::cerr << "Warning: some messages may be undelivered ("
<< _ep << ")" << std::endl;
_socket.cancel();
_timer.cancel();
});
}
void TransmitData(std::string_view msg) {
post(_socket.get_executor(),
[this, self = shared_from_this(), msg = std::string(msg.substr(0, MAX_BUFFER_LENGTH))] {
_outbox.emplace_back(std::move(msg));
if (_outbox.size() == 1) { // no send loop already active?
DoSendLoop();
}
});
}
private:
// The DoXXXX functions are assumed to be on the strand
void DoSendLoop() {
DoIdle(); // restart max_idle even after last successful send
if (_outbox.empty())
return;
boost::asio::async_write(
_socket, boost::asio::buffer(_outbox.front()),
[this, self = shared_from_this()](error_code ec, size_t xfr) {
std::cerr << "Write " << xfr << " bytes to " << _ep << " " << ec.message() << std::endl;
if (!ec) {
_outbox.pop_front();
DoSendLoop();
} else
_timer.cancel(); // causes Comm shutdown
});
}
void DoIdle() {
_timer.expires_from_now(_max_idle); // cancels any pending wait
_timer.async_wait([this, self = shared_from_this()](error_code ec) {
if (!ec) {
std::cerr << "Timeout" << std::endl;
_socket.cancel();
}
});
}
tcp::endpoint _ep;
duration _max_idle;
tcp::socket _socket;
boost::asio::high_resolution_timer _timer;
std::deque<std::string> _outbox;
};
class TCPClient {
boost::asio::any_io_executor _ex;
std::deque<Handle> _comms;
public:
TCPClient(boost::asio::any_io_executor ex) : _ex(ex) {}
void Add(tcp::endpoint ep, duration max_idle = 3s)
{
auto pcomm = std::make_shared<Comm>(make_strand(_ex), ep, max_idle);
pcomm->Start();
_comms.push_back(pcomm);
// optionally garbage collect expired handles:
std::erase_if(_comms, std::mem_fn(&Handle::expired));
}
void TransmitData(std::string_view msg) {
for (auto& handle : _comms)
if (auto pcomm = handle.lock())
pcomm->TransmitData(msg);
}
void Stop() {
for (auto& handle : _comms)
if (auto pcomm = handle.lock())
pcomm->Stop();
}
};
int main() {
using std::this_thread::sleep_for;
boost::asio::thread_pool ctx(1);
TCPClient c(ctx.get_executor());
c.Add({{}, 8989});
c.Add({{}, 8990}, 1s); // shorter timeout for demo
c.TransmitData("Hello world\n");
c.Add({{}, 8991});
sleep_for(2s); // times out second connection
c.TransmitData("Three is a crowd\n"); // only delivered to 8989 and 8991
sleep_for(1s); // allow for delivery
c.Stop();
ctx.join();
}
Prints (on Coliru):
for p in {8989..8991}; do netcat -t -l -p $p& done
sleep .5; ./a.out
Hello world
Connect: Success
Connect: Success
Hello world
Connect: Success
Write 12 bytes to 0.0.0.0:8989 Success
Write 12 bytes to 0.0.0.0:8990 Success
Timeout
Comm closed (0.0.0.0:8990)
Write Three is a crowd
17Three is a crowd
bytes to 0.0.0.0:8989 Success
Write 17 bytes to 0.0.0.0:8991 Success
Comm closed (0.0.0.0:8989)
Comm closed (0.0.0.0:8991)
The output is a little out of sequence there. Live local demo:
Related
I've created a simple wrapper for boost::asio library. My wrapper consists of 4 main classes: NetServer (server), NetClient (client), NetSession (client/server session) and Network (composition class of these three which also includes all callback methods).
The problem is that the first connection client/server works flawlessly, but when I then stop server, start it again and then try to connect the client, the server just doesn't recognize the client. It seems like the acceptor callback isn't called. And client does connect to server, because first - the connection goes without errors, second - when I close the server's program, the client receives the error message WSAECONNRESET.
I've created test program which emulates the procedure written above. It does following:
Starts the server
Starts the client
Client succesfully connects to server
Stops the server
Client receives the error and disconnects itself
Starts the server again
Client again succesfully connects to server
BUT SERVER DOESN'T CALL THE ACCEPTOR CALLBACK ANYMORE
It means that in point 3 the acceptor succesfully calls the callback function, but in point 7 the acceptor doesn't call the callback.
I think I do something wrong in stop()/start() method of the server, but I can't figure out what's exactly wrong.
The source of the NetServer class:
NetServer::NetServer(Network& netRef) : net{ netRef }
{
acceptor = std::make_unique<boost::asio::ip::tcp::acceptor>(ioc);
}
NetServer::~NetServer(void)
{
ioc.stop();
if (threadStarted)
{
th.join();
threadStarted = false;
}
if (active)
stop();
}
int NetServer::start(void)
{
assert(getAcceptHandler() != nullptr);
assert(getHeaderHandler() != nullptr);
assert(getDataHandler() != nullptr);
assert(getErrorHandler() != nullptr);
closeAll();
try
{
ep = boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), srvPort);
acceptor->open(ep.protocol());
acceptor->bind(ep);
acceptor->listen();
initAccept();
}
catch (system::system_error& e)
{
return e.code().value();
}
if (!threadStarted)
{
th = std::thread([this]()
{
ioc.run();
});
threadStarted = true;
}
active = true;
return Network::NET_OK;
}
int NetServer::stop(void)
{
ioc.post(boost::bind(&NetServer::_stop, this));
return Network::NET_OK;
}
void NetServer::_stop(void)
{
boost::system::error_code ec;
acceptor->close(ec);
for (auto& s : sessions)
closeSession(s.get(), false);
active = false;
}
void NetServer::initAccept(void)
{
sock = std::make_shared<asio::ip::tcp::socket>(ioc);
acceptor->async_accept(*sock.get(), [this](const boost::system::error_code& error)
{
onAccept(error, sock);
});
}
void NetServer::onAccept(const boost::system::error_code& ec, SocketSharedPtr sock)
{
if (ec.value() == 0)
{
if (accHandler())
{
addSession(sock);
initAccept();
}
}
else
getErrorHandler()(nullptr, ec);
}
SessionPtr NetServer::addSession(SocketSharedPtr sock)
{
std::lock_guard<std::mutex> guard(mtxSession);
auto session = std::make_shared<NetSession>(sock, *this, true);
sessions.insert(session);
session->start();
return session;
}
SessionPtr NetServer::findSession(const SessionPtr session)
{
for (auto it = std::begin(sessions); it != std::end(sessions); it++)
if (*it == session)
return *it;
return nullptr;
}
bool NetServer::closeSession(const void *session, bool erase /* = true */)
{
std::lock_guard<std::mutex> guard(mtxSession);
for (auto it = std::begin(sessions); it != std::end(sessions); it++)
if (it->get() == session)
{
try
{
it->get()->getSocket()->cancel();
it->get()->getSocket()->shutdown(asio::socket_base::shutdown_send);
it->get()->getSocket()->close();
it->get()->getSocket().reset();
}
catch (system::system_error& e)
{
UNREFERENCED_PARAMETER(e);
}
if (erase)
sessions.erase(*it);
return true;
}
return false;
}
void NetServer::closeAll(void)
{
using namespace boost::placeholders;
std::lock_guard<std::mutex> guard(mtxSession);
std::for_each(sessions.begin(), sessions.end(), boost::bind(&NetSession::stop, _1));
sessions.clear();
}
bool NetServer::write(const SessionPtr session, std::string msg)
{
if (SessionPtr s = findSession(session); s)
{
s->addMessage(msg);
if (s->canWrite())
s->write();
return true;
}
return false;
}
This is the output from the server:
Enter 0 - server, 1 - client: 0
1. Server started
3. Client connected to server
Stopping server....
4. Server stopped
Net error, server, acceptor: ERROR_OPERATION_ABORTED
Net error, server, ERROR_OPERATION_ABORTED
Client session deleted
6. Server started again
(HERE SHOULD BE "8. Client again connected to server", but the server didn't recognize the reconnected client!)
And from the client:
Enter 0 - server, 1 - client: 1
2. Client started and connected to server
Net error, client: ERROR_FILE_NOT_FOUND
5. Client disconnected from server
Waiting 3 sec before reconnect...
Connecting to server...
7. Client started and connected to server
(WHEN I CLOSE THE SERVER WINDOW, I RECVEIVE HERE THE "Net error, client: WSAECONNRESET" MESSAGE - it means client was connected to server anyhow!)
If the code of NetClient, NetSession and Network is necessary, just let me know.
Thanks in advance
Wow. There's a lot to unpack. There is quite a lot of code smell that reminds me of some books on Asio programming that turned out to be... not excellent in my previous experience.
I couldn't give any real advice without grokking your code, which requires me to review in-depth and add missing bits. So let me just provide you with my reviewed/fixed code first, then we'll talk about some of the details.
A few areas where you seemed to have trouble making up your mind:
whether to use a strand or to use mutex locking
whether to use async or sync (e.g. closeSession is completely synchronous and blokcing)
whether to use shared-pointers for lifetime or not: on the one hand you have NetSesion support shared_from_this, but on the other hand you are keeping them alive in a sessions collection.
whether to use smart pointers or raw pointers (sp.get() is a code smell)
whether to use void* pointers or forward declared structs for opaque implementation
whether to use exceptions or to use error codes. Specifically:
return e.code().value();
is a Very Bad Idea. Just return error_code already. Or just propagate the exception.
judging from the use, my best bet is that sessions is std::set<SessionPtr>. Then it's funny that you're doing linear searches. In fact, findSession could be:
SessionPtr findSession(SessionPtr const& session) {
std::lock_guard guard(mtxSessions);
return sessions.contains(session)? session: nullptr;
}
In fact, given some natural invariants, it could just be
auto findSession(SessionPtr s) { return std::move(s); }
Note as well, you had forgotten to lock the mutex in findSession
closeSession completely violates Law Of Demeter, 6*3 times over if you will. In my example I make it so SessHandle is a weak pointer to NetSession and you can just write:
for (auto& handle : sessions)
if (auto sess = handle.lock())
sess->close();
Of course, sess->close() should not block
Also, it should correctly synchronize on the session e.g. using the sessions strand:
void close() {
return post(sock_.get_executor(), [this, self = shared_from_this()] {
error_code ec;
if (!ec) sock_.cancel(ec);
if (!ec) sock_.shutdown(tcp::socket::shutdown_send, ec);
if (!ec) sock_.close(ec);
});
}
If you insist, you can make it so the caller can still await the result and receive any exceptions:
std::future<void> close() {
return post(
sock_.get_executor(),
std::packaged_task<void()>{[this, self = shared_from_this()] {
sock_.cancel();
sock_.shutdown(tcp::socket::shutdown_send);
sock_.close();
}});
}
Honestly, that seems overkill since you never look at the return value anyways.
In general, I recommend leaving socket::close() to the destructor. It avoids a specific class of race-conditions on socket handles.
Don't use boolean flags (isThreadActive is better replaced with th.joinable())
apparently you had NetSession::stop which I imagine did largely the same as closeSession but in the right place? I replaced it with the new NetSession::close
subtly when accHandler returned false, you would exit the accept loop alltogether. I doubt that was on purpose
try to minimize time under locks:
std::future<void> close() {
return post(
sock_.get_executor(),
std::packaged_task<void()>{[this, self = shared_from_this()] {
sock_.cancel();
sock_.shutdown(tcp::socket::shutdown_send);
sock_.close();
}});
}
I will show you how to do without the lock entirely instead.
Demo Listing
#include <boost/asio.hpp>
#include <boost/system/error_code.hpp>
#include <deque>
#include <iostream>
#include <iomanip>
#include <set>
using namespace std::chrono_literals;
using namespace std::placeholders;
namespace asio = boost::asio;
using asio::ip::tcp;
using boost::system::error_code;
static inline std::ostream debug(std::cerr.rdbuf());
struct Network {
static constexpr error_code NET_OK{};
};
struct NetSession; // opaque forward reference
struct NetServer;
using SessHandle = std::weak_ptr<NetSession>; // opaque handle
using Sessions = std::set<SessHandle, std::owner_less<>>;
struct NetSession : std::enable_shared_from_this<NetSession> {
NetSession(tcp::socket&& s, NetServer& srv, bool)
: sock_(std::move(s))
, srv_(srv) {
debug << "New session from " << getPeer() << std::endl;
}
void start() {
post(sock_.get_executor(),
std::bind(&NetSession::do_read, shared_from_this()));
}
tcp::endpoint getPeer() const { return peer_; }
void close() {
return post(sock_.get_executor(), [this, self = shared_from_this()] {
debug << "Closing " << getPeer() << std::endl;
error_code ec;
if (!ec) sock_.cancel(ec);
if (!ec) sock_.shutdown(tcp::socket::shutdown_send, ec);
// if (!ec) sock_.close(ec);
});
}
void addMessage(std::string msg) {
post(sock_.get_executor(),
[this, msg = std::move(msg), self = shared_from_this()] {
outgoing_.push_back(std::move(msg));
if (canWrite())
write_loop();
});
}
private:
// assumed on (logical) strand
bool canWrite() const { // FIXME misnomer: shouldStartWriteLoop()?
return outgoing_.size() == 1;
}
void write_loop() {
if (outgoing_.empty())
return;
async_write(sock_, asio::buffer(outgoing_.front()),
[this, self = shared_from_this()](error_code ec, size_t) {
if (!ec) {
outgoing_.pop_front();
write_loop();
}
});
}
void do_read() {
incoming_.clear();
async_read_until(
sock_, asio::dynamic_buffer(incoming_), "\n",
std::bind(&NetSession::on_read, shared_from_this(), _1, _2));
}
void on_read(error_code ec, size_t);
tcp::socket sock_;
tcp::endpoint peer_ = sock_.remote_endpoint();
NetServer& srv_;
std::string incoming_;
std::deque<std::string> outgoing_;
};
using SessionPtr = std::shared_ptr<NetSession>;
using SocketSharedPtr = std::shared_ptr<tcp::socket>;
struct NetServer {
NetServer(Network& netRef) : net{netRef} {}
~NetServer()
{
if (acceptor.is_open())
acceptor.cancel(); // TODO seems pretty redundant
stop();
if (th.joinable())
th.join();
}
std::function<bool()> accHandler;
std::function<void(SocketSharedPtr, error_code)> errHandler;
// TODO headerHandler
std::function<void(SessionPtr, error_code, std::string)> dataHandler;
error_code start() {
assert(accHandler);
assert(errHandler);
assert(dataHandler);
closeAll(sessions);
error_code ec;
if (!ec) acceptor.open(tcp::v4(), ec);
if (!ec) acceptor.bind({{}, srvPort}, ec);
if (!ec) acceptor.listen(tcp::socket::max_listen_connections, ec);
if (!ec) {
do_accept();
if (!th.joinable()) {
th = std::thread([this] { ioc.run(); }); // TODO exceptions!
}
}
if (ec && acceptor.is_open())
acceptor.close();
return ec;
}
void stop() { //
post(ioc, std::bind(&NetServer::do_stop, this));
}
void closeSession(SessHandle handle, bool erase = true) {
post(acceptor.get_executor(), [=, this] {
if (auto s = handle.lock()) {
s->close();
}
if (erase) {
sessions.erase(handle);
}
});
}
void closeAll() {
post(acceptor.get_executor(), [this] {
closeAll(sessions);
sessions.clear();
});
}
// TODO FIXME is the return value worth it?
bool write(SessionPtr const& session, std::string msg) {
return post(acceptor.get_executor(),
std::packaged_task<bool()>{std::bind(
&NetServer::do_write, this, session, std::move(msg))})
.get();
}
// compare
void writeAll(std::string msg) {
post(acceptor.get_executor(),
std::bind(&NetServer::do_write_all, this, std::move(msg)));
}
private:
Network& net;
asio::io_context ioc;
tcp::acceptor acceptor{ioc}; // active -> acceptor.is_open()
std::thread th; // threadActive -> th.joinable()
Sessions sessions;
std::uint16_t srvPort = 8989;
// std::mutex mtxSessions; // note naming; also replaced by logical strand
// assumed on acceptor logical strand
void do_accept() {
acceptor.async_accept(
make_strand(ioc), [this](error_code ec, tcp::socket sock) {
if (ec.failed()) {
return errHandler(nullptr, ec);
}
if (accHandler()) {
auto s = std::make_shared<NetSession>(std::move(sock),
*this, true);
sessions.insert(s);
s->start();
}
do_accept();
});
}
SessionPtr do_findSession(SessionPtr const& session) {
return sessions.contains(session) ? session : nullptr;
}
bool do_write(SessionPtr session, std::string msg) {
if (auto s = do_findSession(session)) {
s->addMessage(std::move(msg));
return true;
}
return false;
}
void do_write_all(std::string msg) {
for(auto& handle : sessions)
if (auto sess = handle.lock())
do_write(sess, msg);
}
static void closeAll(Sessions const& sessions) {
for (auto& handle : sessions)
if (auto sess = handle.lock())
sess->close();
}
void do_stop()
{
if (acceptor.is_open()) {
error_code ec;
acceptor.close(ec); // TODO error handling?
}
closeAll(sessions); // TODO FIXME why not clear sessions?
}
};
// Implementation must be after NetServer definition:
void NetSession::on_read(error_code ec, size_t) {
if (srv_.dataHandler)
srv_.dataHandler(shared_from_this(), ec, std::move(incoming_));
if (!ec)
do_read();
}
int main() {
Network net;
NetServer srv{net};
srv.accHandler = [] { return true; };
srv.errHandler = [](SocketSharedPtr, error_code ec) {
debug << "errHandler: " << ec.message() << std::endl;
};
srv.dataHandler = [](SessionPtr sess, error_code ec, std::string msg) {
debug << "dataHandler: " << sess->getPeer() << " " << ec.message()
<< " " << std::quoted(msg) << std::endl;
};
srv.start();
std::this_thread::sleep_for(10s);
std::cout << "Shutdown started" << std::endl;
srv.writeAll("We're going to shutdown, take care!\n");
srv.stop();
}
Live Demo:
I want to make this simple server that listens to incoming connection requests, makes connections and sends some data. When I start this acceptor looks like it's working fine, it waits for those incoming connection requests, but when my client tries to connect to this acceptor it automatically crushes. I cant even catch any exceptions with catch(...)
When I start this program it looks like this in a terminal
But when I try to connect
Client application received this kind of error code
Is there something fundamentally wrong with my my_acceptor class?
class my_acceptor{
public:
my_acceptor(asio::io_context& ios, unsigned short port_num) :
m_ios(ios),
port{port_num},
m_acceptor{ios}{}
//start accepting incoming connection requests
void Start()
{
std::cout << "Acceptor Start" << std::endl;
boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::tcp::v4(), port);
m_acceptor.open(endpoint.protocol());
m_acceptor.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true));
m_acceptor.bind(endpoint);
m_acceptor.listen();
InitAccept();
}
void Stop(){}
private:
void InitAccept()
{
std::cout << "Acceptor InitAccept" << std::endl;
std::shared_ptr<asio::ip::tcp::socket> sock{new asio::ip::tcp::socket(m_ios)};
m_acceptor.async_accept(*sock.get(),
[this, sock](const boost::system::error_code& error)
{
onAccept(error, sock);
});
}
void onAccept(const boost::system::error_code& ec, std::shared_ptr<asio::ip::tcp::socket> sock)
{
std::cout << "Acceptor onAccept" << std::endl;
}
private:
unsigned short port;
asio::io_context& m_ios;
asio::ip::tcp::acceptor m_acceptor;
};
Just in case this is the Server code that wraps my_acceptor
class Server{
public:
Server(){}
//start the server
void Start(unsigned short port_num, unsigned int thread_pool_size)
{
assert(thread_pool_size > 0);
//create specified number of threads and add them to the pool
for(unsigned int i = 0; i < thread_pool_size; ++i)
{
std::unique_ptr<std::thread> th(
new std::thread([this]()
{
m_ios.run();
}));
m_thread_pool.push_back(std::move(th));
}
//create and start acceptor
acc.reset(new my_acceptor(m_ios, port_num));
acc->Start();
}
//stop the server
void Stop()
{
work_guard.reset();
acc->Stop();
m_ios.stop();
for(auto& th : m_thread_pool)
{
th->join();
}
}
private:
asio::io_context m_ios;
boost::asio::executor_work_guard<boost::asio::io_context::executor_type> work_guard = boost::asio::make_work_guard(m_ios);
std::unique_ptr<my_acceptor> acc;
std::vector<std::unique_ptr<std::thread>> m_thread_pool;
};
There's a threading bug, at least. tcp::acceptor is not thread-safe and you (potentially) run multiple threads. So you will need to make the acceptor access be done from a strand.
my_acceptor(asio::io_context& ios, unsigned short port_num) :
m_ios(ios),
port{port_num},
m_acceptor{make_strand(ios)}{}
And then any operation involving it must be on that strand. E.g., the missing Stop() code should look like:
void Stop(){
post(m_acceptor.get_executor(), [this] { m_acceptor.cancel(); });
}
I leave the initial accept as-is because at that point there aren't multiple threads involved.
Likewise in Start() and Stop() you should check whether acc is null, because acc->Stop() would throw and just replacing a running acc would cause Undefined Behaviour due to deleting the instance that is still having async operations in flight.
In a sidenote, m_ios.stop() should not be necessary if you stop the running acceptor. In the future you might have to signal any client connections to stop, in order for the threads to naturally join.
Here's how I'd complete the accept loop:
void onAccept(error_code ec, std::shared_ptr<tcp::socket> sock)
{
std::cout << "Acceptor onAccept " << ec.message() << " " << sock.get() << std::endl;
if (!ec) {
InitAccept();
}
}
Note how unless the socket is canceled (or otherwise in error), we keep accepting.
I think the threading issue was likely your big problem. The result after my suggestions works:
Live On Coliru
#include <boost/asio.hpp>
#include <iostream>
#include <memory>
#include <thread>
using namespace std::chrono_literals;
namespace asio = boost::asio;
using boost::system::error_code;
using asio::ip::tcp;
class my_acceptor {
public:
my_acceptor(asio::io_context& ios, unsigned short port_num) :
m_ios(ios),
port{port_num},
m_acceptor{make_strand(ios)}{}
//start accepting incoming connection requests
void Start()
{
std::cout << "Acceptor Start" << std::endl;
tcp::endpoint endpoint(tcp::v4(), port);
m_acceptor.open(endpoint.protocol());
m_acceptor.set_option(tcp::acceptor::reuse_address(true));
m_acceptor.bind(endpoint);
m_acceptor.listen();
InitAccept();
}
void Stop(){
post(m_acceptor.get_executor(), [this] { m_acceptor.cancel(); });
}
private:
void InitAccept()
{
std::cout << "Acceptor InitAccept" << std::endl;
auto sock = std::make_shared<tcp::socket>(m_ios);
m_acceptor.async_accept(*sock,
[this, sock](error_code error) { onAccept(error, sock); });
}
void onAccept(error_code ec, const std::shared_ptr<tcp::socket>& sock)
{
std::cout << "Acceptor onAccept " << ec.message() << " " << sock.get() << std::endl;
if (!ec) {
InitAccept();
}
}
private:
asio::io_context& m_ios;
unsigned short port;
tcp::acceptor m_acceptor;
};
class Server{
public:
Server() = default;
//start the server
void Start(unsigned short port_num, unsigned int thread_pool_size)
{
assert(!acc); // otherwise UB results
assert(thread_pool_size > 0);
//create specified number of threads and add them to the pool
for(unsigned int i = 0; i < thread_pool_size; ++i)
{
std::unique_ptr<std::thread> th(
new std::thread([this]() { m_ios.run(); }));
m_thread_pool.push_back(std::move(th));
}
//create and start acceptor
acc = std::make_unique<my_acceptor>(m_ios, port_num);
acc->Start();
}
//stop the server
void Stop()
{
work_guard.reset();
if (acc) {
acc->Stop();
}
//m_ios.stop();
for(auto& th : m_thread_pool) {
th->join();
}
acc.reset();
}
private:
asio::io_context m_ios;
asio::executor_work_guard<asio::io_context::executor_type>
work_guard = make_work_guard(m_ios);
std::unique_ptr<my_acceptor> acc;
std::vector<std::unique_ptr<std::thread>> m_thread_pool;
};
int main() {
Server s;
s.Start(6868, 1);
std::this_thread::sleep_for(10s);
s.Stop();
}
Testing with netcat as client:
for msg in one two three; do
sleep 1
nc 127.0.0.1 6868 <<< "$msg"
done
Prints
Acceptor Start
Acceptor InitAccept
Acceptor onAccept Success 0x1f26960
Acceptor InitAccept
Acceptor onAccept Success 0x7f59f80009d0
Acceptor InitAccept
Acceptor onAccept Success 0x7f59f8000a50
Acceptor InitAccept
Acceptor onAccept Operation canceled 0x7f59f80009d0
I'm currently trying to get a chatroom type program working with boost::asio. In the current state, the server is able to accept connections from clients, and then the clients are able to send messages to the server (at which point, the server formats the message a little bit and then sends it to every client currently connected).
The problem I am having is as follows:
server starts
client 0 connects
client 0 sends a message
(the message is received by the server and then sent back to client 0 who receives it correctly)
client 1 connects
client 1 sends a message
(the message is received by the server and then sent back to client 0 and client 1 who both receive it correctly)
client 0 tries to send a message again
(the message is received by the server and the server processes the header then attempts to call async_read again to read the body of the message, however the socket member variable for client 0 no longer exists and I get a segfault)
I find this really strange because the server still has a valid socket object for client 0 (otherwise it wouldn't be able to send client 1's messages to client 0).
Here is the relevant code:
tcp_connection class (where the segfault occurs)
#include <deque>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
using boost::asio::ip::tcp;
class tcp_connection {
public:
tcp_connection(tcp::socket socket_, int id, std::function<void (std::size_t, char*, std::size_t)> read_handler)
: socket_(std::move(socket)), id_(id), read_handler_(read_handler) {
}
void start() {
char first_message[] = "server: connected";
net_message msg(first_message, strlen(first_message));
send(msg);
read_header();
}
void send(net_message msg) {
bool write_in_progress = !write_messages_.empty();
write_messages_.push_back(msg);
if (!write_in_progress) {
do_write();
}
}
int get_id() { return id_; }
private:
void read_header() {
boost::asio::async_read(socket_, boost::asio::buffer(read_message_.get_data(), net_message::header_length),
boost::bind(&tcp_connection::handle_read_header, this, boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
void handle_read_header(const boost::system::error_code e, std::size_t bytes_transferred) {
read_message_.decode_header();
read_body();
}
void read_body() {
/*
######################
THIS IS WHERE THE SEGFAULT OCCURS.
socket_ is no longer valid for some reason
despite socket_ still being valid for any async_write
operations that need to be handled by the do_write() function
######################
*/
boost::asio::async_read(socket_, boost::asio::buffer(read_message_.get_data() + net_message::header_length, read_message_.get_body_length()),
boost::bind(&tcp_connection::handle_read_body, this, boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
void handle_read_body(const boost::system::error_code e, std::size_t bytes_transferred) {
char body[read_message_.get_body_length()];
memcpy(body, read_message_.get_body(), read_message_.get_body_length());
// call the read_handler from the net_server object
read_handler_(id_, body, read_message_.get_body_length());
read_header();
}
void handle_write(const boost::system::error_code e, std::size_t bytes_transferred) {
}
void do_write() {
boost::asio::async_write(socket_, boost::asio::buffer(write_messages_.front().get_data(),
write_messages_.front().get_body_length() + net_message::header_length),
[this] (boost::system::error_code ec, std::size_t /*length*/) {
if (!ec) {
write_messages_.pop_front();
if (!write_messages_.empty()) {
do_write();
}
} else {
std::cerr << "error with writing to client " << id_ << " with error code: " << ec << std::endl;
}
});
}
tcp::socket socket_;
std::function<void (std::size_t, char*, std::size_t)> read_handler_;
net_message read_message_;
std::deque<net_message> write_messages_;
int id_;
};
net_server class
class net_server {
public:
net_server(boost::asio::io_context& io_context, std::size_t port,
std::function<void (std::size_t)> accept_handler,
std::function<void (std::size_t, char*, std::size_t)> read_handler)
: io_context_(io_context), acceptor_(io_context, tcp::endpoint(tcp::v4(), 1234)),
accept_handler_(accept_handler), read_handler_(read_handler) {
start_accept();
}
void send_to(std::size_t id, const char* body, std::size_t length) {
net_message msg(body, length);
connections_[id].send(msg);
}
void send_to_all(const char* body, std::size_t length) {
net_message msg(body, length);
for (int i = 0; i < connections_.size(); i++) {
connections_[i].send(msg);
}
}
void send_to_all_except(std::size_t id, const char* body, std::size_t length) {
net_message msg(body, length);
for (int i = 0; i < connections_.size(); i++) {
if (i == id) continue;
connections_[i].send(msg);
}
}
private:
void start_accept() {
acceptor_.async_accept(
[this](boost::system::error_code ec, tcp::socket socket) {
if (!ec) {
std::unique_lock lock(connections_mutex_);
std::size_t index = connections_.size();
connections_.emplace_back(std::move(socket), connections_.size(), read_handler_);
lock.unlock();
connections_[index].start();
accept_handler_(index);
}
start_accept();
});
}
boost::asio::io_context& io_context_;
tcp::acceptor acceptor_;
std::vector<tcp_connection> connections_;
std::mutex connections_mutex_;
std::function<void (std::size_t)> accept_handler_;
std::function<void (std::size_t, char*, std::size_t)> read_handler_;
};
main cpp program that sets up the server
#include <iostream>
class client {
public:
client()
: valid_(false)
{}
client(int id)
: id_(id), valid_(true)
{}
const char * get_name() const {
std::string str("Client ");
str += std::to_string(id_);
return str.c_str();
}
private:
int id_;
bool valid_;
};
class chat_server {
public:
chat_server(boost::asio::io_context& io_context, std::size_t port)
: server_(io_context, port, std::bind(&chat_server::handle_accept, this, std::placeholders::_1),
std::bind(&chat_server::handle_read, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3))
{}
void handle_accept(std::size_t client_index) {
std::scoped_lock lock(clients_mutex_);
if (clients_.size() != client_index) {
std::cerr << "New client connecting at index " << client_index <<
" however, clients_ vector currently has size " << clients_.size() << std::endl;
if (clients_.size() < client_index) {
clients_.resize(client_index);
clients_.emplace_back(client_index);
} else {
clients_[client_index] = client(client_index);
}
} else {
clients_.emplace_back(client_index);
}
std::cout << "New client with id: " << client_index << std::endl;
}
void handle_read(std::size_t sender, char* body, std::size_t length) {
// whenever the server receives a message, this function will be called
// where clients[sender] will be the connection that sent the message
// body will be a pointer to the start of the body of the message
// and length will be the length of the body
// we will process the message here and decide if / what to send in response
// (for example, in a chat server, we'd want to forward the message to every client
// with the name of the sender attached to it so that clients can update the chat dialogue)
std::size_t sender_name_len = strlen(clients_[sender].get_name());
std::size_t new_message_length = sender_name_len + length + 3;
char new_message[new_message_length];
sprintf(new_message, "%s: ", clients_[sender].get_name());
memcpy(new_message + sender_name_len + 2, body, length);
new_message[new_message_length - 1] = '\0';
std::cout << new_message << std::endl;
server_.send_to_all(new_message, new_message_length-1);
}
private:
net_server server_;
std::vector<client> clients_;
std::mutex clients_mutex_;
};
int main() {
try {
boost::asio::io_context io_context;
chat_server serv(io_context, 1234);
io_context.run();
} catch (std::exception& e) {
std::cerr << e.what() << std::endl;
}
return 0;
}
What I want is for my server class to maintain a list of tcp_connections which each represent a client that has connected to the server. When the server accepts a connection, a tcp_connection object is created for that connection and then that tcp_connection object starts an infinite asynchronous "read_header -> read_body -> repeat" loop. Whenever the server receives a message from any of the clients, it should format the message and then send it to every tcp_connection in the list.
Your connections_ member variable is being reallocated when you add new elements to it. In your various handlers in tcp_connection you are capturing this, when the vector is reallocated the value of this will change and your handlers will then try to operate on the old copy of the object causing undefined behaviour.
The simple solution is to make your connections_ vector a vector of std::shared_ptr.
It is also best practice to capture your object's shared_ptr in your handlers so that the object can't go out of scope before your callbacks execute. e.g.:
void do_write() {
auto self = shared_from_this();
boost::asio::async_write(socket_, boost::asio::buffer(write_messages_.front().get_data(),
write_messages_.front().get_body_length() + net_message::header_length),
[self, this] (boost::system::error_code ec, std::size_t /*length*/) {
if (!ec) {
write_messages_.pop_front();
if (!write_messages_.empty()) {
do_write();
}
} else {
std::cerr << "error with writing to client " << id_ << " with error code: " << ec << std::endl;
}
});
}
You'll need to derive tcp_connection from std::shared_from_this<tcp_connection> and make sure that you have created the shared_ptr before setting up any handlers (e.g. don't create handlers in the constructor).
I have next snippet:
void TcpConnection::Send(const std::vector<uint8_t>& buffer) {
std::shared_ptr<std::vector<uint8_t>> bufferCopy = std::make_shared<std::vector<uint8_t>>(buffer);
auto socket = m_socket;
m_socket->async_send(asio::buffer(bufferCopy->data(), bufferCopy->size()), [socket, bufferCopy](const boost::system::error_code& err, size_t bytesSent)
{
if (err)
{
logwarning << "clientcomms_t::sendNext encountered error: " << err.message();
// Assume that the communications path is no longer
// valid.
socket->close();
}
});
}
This code leads to memory leak. if m_socket->async_send call is commented then there is not memeory leak. I can not understand why bufferCopy is not freed after callback is dispatched. What I am doing wrong?
Windows is used.
Since you don't show any relevant code, and the code shown does not contain a strict problem, I'm going to assume from the code smells.
The smell is that you have a TcpConnection class that is not enable_shared_from_this<TcpConnection> derived. This leads me to suspect you didn't plan ahead, because there's no possible reasonable way to continue using the instance after the completion of any asynchronous operation (like the async_send).
This leads me to suspect you have a crucially simple problem, which is that your completion handler never runs. There's only one situation that could explain this, and that leads me to assume you never run() the ios_service instance
Here's the situation live:
Live On Coliru
#include <boost/asio.hpp>
namespace asio = boost::asio;
using asio::ip::tcp;
#include <iostream>
auto& logwarning = std::clog;
struct TcpConnection {
using Buffer = std::vector<uint8_t>;
void Send(Buffer const &);
TcpConnection(asio::io_service& svc) : m_socket(std::make_shared<tcp::socket>(svc)) {}
tcp::socket& socket() const { return *m_socket; }
private:
std::shared_ptr<tcp::socket> m_socket;
};
void TcpConnection::Send(Buffer const &buffer) {
auto bufferCopy = std::make_shared<Buffer>(buffer);
auto socket = m_socket;
m_socket->async_send(asio::buffer(bufferCopy->data(), bufferCopy->size()),
[socket, bufferCopy](const boost::system::error_code &err, size_t /*bytesSent*/) {
if (err) {
logwarning << "clientcomms_t::sendNext encountered error: " << err.message();
// Assume that the communications path is no longer
// valid.
socket->close();
}
});
}
int main() {
asio::io_service svc;
tcp::acceptor a(svc, tcp::v4());
a.bind({{}, 6767});
a.listen();
boost::system::error_code ec;
do {
TcpConnection conn(svc);
a.accept(conn.socket(), ec);
char const* greeting = "whale hello there!\n";
conn.Send({greeting, greeting+strlen(greeting)});
} while (!ec);
}
You'll see that any client, connection e.g. with netcat localhost 6767 will receive the greeting, after which, surprisingly the connection will stay open, instead of being closed.
You'd expect the connection to be closed by the server side either way, either because
a transmission error occurred in async_send
or because after the completion handler is run, it is destroyed and hence the captured shared-pointers are destructed. Not only would that free the copied buffer, but also would it run the destructor of socket which would close the connection.
This clearly confirms that the completion handler never runs. The fix is "easy", find a place to run the service:
int main() {
asio::io_service svc;
tcp::acceptor a(svc, tcp::v4());
a.set_option(tcp::acceptor::reuse_address());
a.bind({{}, 6767});
a.listen();
std::thread th;
{
asio::io_service::work keep(svc); // prevent service running out of work early
th = std::thread([&svc] { svc.run(); });
boost::system::error_code ec;
for (int i = 0; i < 11 && !ec; ++i) {
TcpConnection conn(svc);
a.accept(conn.socket(), ec);
char const* greeting = "whale hello there!\n";
conn.Send({greeting, greeting+strlen(greeting)});
}
}
th.join();
}
This runs 11 connections and exits leak-free.
Better:
It becomes a lot cleaner when the accept loop is also async, and the TcpConnection is properly shared as hinted above:
Live On Coliru
#include <boost/asio.hpp>
namespace asio = boost::asio;
using asio::ip::tcp;
#include <memory>
#include <thread>
#include <iostream>
auto& logwarning = std::clog;
struct TcpConnection : std::enable_shared_from_this<TcpConnection> {
using Buffer = std::vector<uint8_t>;
TcpConnection(asio::io_service& svc) : m_socket(svc) {}
void start() {
char const* greeting = "whale hello there!\n";
Send({greeting, greeting+strlen(greeting)});
}
void Send(Buffer);
private:
friend struct Server;
Buffer m_output;
tcp::socket m_socket;
};
struct Server {
Server(unsigned short port) {
_acceptor.set_option(tcp::acceptor::reuse_address());
_acceptor.bind({{}, port});
_acceptor.listen();
do_accept();
}
~Server() {
keep.reset();
_svc.post([this] { _acceptor.cancel(); });
if (th.joinable())
th.join();
}
private:
void do_accept() {
auto conn = std::make_shared<TcpConnection>(_svc);
_acceptor.async_accept(conn->m_socket, [this,conn](boost::system::error_code ec) {
if (ec)
logwarning << "accept failed: " << ec.message() << "\n";
else {
conn->start();
do_accept();
}
});
}
asio::io_service _svc;
// prevent service running out of work early:
std::unique_ptr<asio::io_service::work> keep{std::make_unique<asio::io_service::work>(_svc)};
std::thread th{[this]{_svc.run();}}; // TODO handle handler exceptions
tcp::acceptor _acceptor{_svc, tcp::v4()};
};
void TcpConnection::Send(Buffer buffer) {
m_output = std::move(buffer);
auto self = shared_from_this();
m_socket.async_send(asio::buffer(m_output),
[self](const boost::system::error_code &err, size_t /*bytesSent*/) {
if (err) {
logwarning << "clientcomms_t::sendNext encountered error: " << err.message() << "\n";
// not holding on to `self` means the socket gets closed
}
// do more with `self` which points to the TcpConnection instance...
});
}
int main() {
Server server(6868);
std::this_thread::sleep_for(std::chrono::seconds(3));
}
I am creating a TCP server that will use boost asio which will accept connections from many clients, receive data, and send confirmations. The thing is that I want to be able to accept all the clients but I want to work only with one at a time. I want all the other transactions to be kept in a queue.
Example:
Client1 connects
Client2 connects
Client1 sends data and asks for reply
Client2 sends data and asks for reply
Client2's request is put into queue
Client1's data is read, server replies, end of transaction
Client2's request is taken from the queue, server reads data, replies end of transaction.
So this is something between asynchronous server and blocking server. I want to do just 1 thing at once but at the same time I want to be able to store all client sockets and their demands in the queue.
I was able to create server-client communication with all the functionality that I need but only on single thread. Once client disconnects server is terminated as well. I don't really know how to start implementing what I have mentioned above. Should I open new thread each time connection is accepted? Should I use async_accept or blocking accept?
I have read boost::asio chat example, where many clients connect so single server, but there is no queuing mechanism that I need here.
I am aware that this post might be a bit confusing but TCP servers are new to me so I am not familiar enough with the terminology. There is also no source code to post because I am asking only for help with concept of this project.
Just keep accepting.
You show no code, but it typically looks like
void do_accept() {
acceptor_.async_accept(socket_, [this](boost::system::error_code ec) {
std::cout << "async_accept -> " << ec.message() << "\n";
if (!ec) {
std::make_shared<Connection>(std::move(socket_))->start();
do_accept(); // THIS LINE
}
});
}
If you don't include the line marked // THIS LINE you will indeed not accept more than 1 connection.
If this doesn't help, please include some code we can work from.
For Fun, A Demo
This uses just standard library features for the non-network part.
Network Listener
The network part is as outlined before:
#include <boost/asio.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <istream>
using namespace std::chrono_literals;
using Clock = std::chrono::high_resolution_clock;
namespace Shared {
using PostRequest = std::function<void(std::istream& is)>;
}
namespace Network {
namespace ba = boost::asio;
using ba::ip::tcp;
using error_code = boost::system::error_code;
using Shared::PostRequest;
struct Connection : std::enable_shared_from_this<Connection> {
Connection(tcp::socket&& s, PostRequest poster) : _s(std::move(s)), _poster(poster) {}
void process() {
auto self = shared_from_this();
ba::async_read(_s, _request, [this,self](error_code ec, size_t) {
if (!ec || ec == ba::error::eof) {
std::istream reader(&_request);
_poster(reader);
}
});
}
private:
tcp::socket _s;
ba::streambuf _request;
PostRequest _poster;
};
struct Server {
Server(unsigned port, PostRequest poster) : _port(port), _poster(poster) {}
void run_for(Clock::duration d = 30s) {
_stop.expires_from_now(d);
_stop.async_wait([this](error_code ec) { if (!ec) _svc.post([this] { _a.close(); }); });
_a.listen();
do_accept();
_svc.run();
}
private:
void do_accept() {
_a.async_accept(_s, [this](error_code ec) {
if (!ec) {
std::make_shared<Connection>(std::move(_s), _poster)->process();
do_accept();
}
});
}
unsigned short _port;
PostRequest _poster;
ba::io_service _svc;
ba::high_resolution_timer _stop { _svc };
tcp::acceptor _a { _svc, tcp::endpoint {{}, _port } };
tcp::socket _s { _svc };
};
}
The only "connection" to the work service part is the PostRequest handler that is passed to the server at construction:
Network::Server server(6767, handler);
I've also opted for async operations, so we can have a timer to stop the service, even though we do not use any threads:
server.run_for(3s); // this blocks
The Work Part
This is completely separate, and will use threads. First, let's define a Request, and a thread-safe Queue:
namespace Service {
struct Request {
std::vector<char> data; // or whatever you read from the sockets...
};
Request parse_request(std::istream& is) {
Request result;
result.data.assign(std::istream_iterator<char>(is), {});
return result;
}
struct Queue {
Queue(size_t max = 50) : _max(max) {}
void enqueue(Request req) {
std::unique_lock<std::mutex> lk(mx);
cv.wait(lk, [this] { return _queue.size() < _max; });
_queue.push_back(std::move(req));
cv.notify_one();
}
Request dequeue(Clock::time_point deadline) {
Request req;
{
std::unique_lock<std::mutex> lk(mx);
_peak = std::max(_peak, _queue.size());
if (cv.wait_until(lk, deadline, [this] { return _queue.size() > 0; })) {
req = std::move(_queue.front());
_queue.pop_front();
cv.notify_one();
} else {
throw std::range_error("dequeue deadline");
}
}
return req;
}
size_t peak_depth() const {
std::lock_guard<std::mutex> lk(mx);
return _peak;
}
private:
mutable std::mutex mx;
mutable std::condition_variable cv;
size_t _max = 50;
size_t _peak = 0;
std::deque<Request> _queue;
};
This is nothing special, and doesn't actually use threads yet. Let's make a worker function that accepts a reference to a queue (more than 1 worker can be started if so desired):
void worker(std::string name, Queue& queue, Clock::duration d = 30s) {
auto const deadline = Clock::now() + d;
while(true) try {
auto r = queue.dequeue(deadline);
(std::cout << "Worker " << name << " handling request '").write(r.data.data(), r.data.size()) << "'\n";
}
catch(std::exception const& e) {
std::cout << "Worker " << name << " got " << e.what() << "\n";
break;
}
}
}
The main Driver
Here's where the Queue gets instantiated and both the network server as well as some worker threads are started:
int main() {
Service::Queue queue;
auto handler = [&](std::istream& is) {
queue.enqueue(Service::parse_request(is));
};
Network::Server server(6767, handler);
std::vector<std::thread> pool;
pool.emplace_back([&queue] { Service::worker("one", queue, 6s); });
pool.emplace_back([&queue] { Service::worker("two", queue, 6s); });
server.run_for(3s); // this blocks
for (auto& thread : pool)
if (thread.joinable())
thread.join();
std::cout << "Maximum queue depth was " << queue.peak_depth() << "\n";
}
Live Demo
See It Live On Coliru
With a test load looking like this:
for a in "hello world" "the quick" "brown fox" "jumped over" "the pangram" "bye world"
do
netcat 127.0.0.1 6767 <<< "$a" || echo "not sent: '$a'"&
done
wait
It prints something like:
Worker one handling request 'brownfox'
Worker one handling request 'thepangram'
Worker one handling request 'jumpedover'
Worker two handling request 'Worker helloworldone handling request 'byeworld'
Worker one handling request 'thequick'
'
Worker one got dequeue deadline
Worker two got dequeue deadline
Maximum queue depth was 6
The includes you need. Some maybe are unnecessary:
boost/asio.hpp, boost/thread.hpp, boost/asio/io_service.hpp
boost/asio/spawn.hpp, boost/asio/write.hpp, boost/asio/buffer.hpp
boost/asio/ip/tcp.hpp, iostream, stdlib.h, array, string
vector, string.h, stdio.h, process.h, iterator
using namespace boost::asio;
using namespace boost::asio::ip;
io_service ioservice;
tcp::endpoint sim_endpoint{ tcp::v4(), 4066 }; //{which connectiontype, portnumber}
tcp::acceptor sim_acceptor{ ioservice, sim_endpoint };
std::vector<tcp::socket> sim_sockets;
static int iErgebnis;
int iSocket = 0;
void do_write(int a) //int a is the postion of the socket in the vector
{
int iWSchleife = 1; //to stay connected with putty or something
static char chData[32000];
std::string sBuf = "Received!\r\n";
while (iWSchleife > 0)
{
boost::system::error_code error;
memset(chData, 0, sizeof(chData)); //clear the char
iErgebnis = sim_sockets[a].read_some(boost::asio::buffer(chData), error); //recv data from client
iWSchleife = iErgebnis; //if iErgebnis is bigger then 0 it will stay in the loop. iErgebniss is always >0 when data is received
if (iErgebnis > 0) {
printf("%d data received from client : \n%s\n\n", iErgebnis, chData);
write(sim_sockets[a], boost::asio::buffer(sBuf), error); //send data to client
}
else {
boost::system::error_code ec;
sim_sockets[a].shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec); //close the socket when no data
if (ec)
{
printf("studown error"); // An error occurred.
}
}
}
}
void do_accept(yield_context yield)
{
while (1) //endless loop to accept limitless clients
{
sim_sockets.emplace_back(ioservice); //look to the link below for more info
sim_acceptor.async_accept(sim_sockets.back(), yield); //waits here to accept an client
boost::thread dosome(do_write, iSocket); //when accepted, starts the thread do_write and passes the parameter iSocket
iSocket++; //to know the position of the socket in the vector
}
}
int main()
{
sim_acceptor.listen();
spawn(ioservice, do_accept); //here you can learn more about Coroutines https://theboostcpplibraries.com/boost.coroutine
ioservice.run(); //from here you jump to do:accept
getchar();
}