I want to create an application that implements one-thread-per-connection model. But each connection must be stoppable. I have tried this boost.asio example which implements the blocking version of what I want. But after a little bit questioning I've found out that there is no reliable way to stop the session of that example. So I've tried to implement my own. I had to use asynchronous functions. Since I want to make a thread to manage only one connection and there is no way to control which asynchronous job is employed to which thread, I decided to use io_service for each connection/socket/thread.
So is it a good approach, do you know a better approach?
My code is here so you can examine and review it:
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/array.hpp>
#include <boost/thread.hpp>
#include <boost/scoped_ptr.hpp>
#include <list>
#include <iostream>
#include <string>
#include <istream>
namespace ba = boost::asio;
namespace bs = boost::system;
namespace b = boost;
typedef ba::ip::tcp::acceptor acceptor_type;
typedef ba::ip::tcp::socket socket_type;
const short PORT = 11235;
class Server;
// A connection has its own io_service and socket
class Connection {
protected:
ba::io_service service;
socket_type sock;
b::thread *thread;
ba::streambuf stream_buffer; // for reading etc
Server *server;
void AsyncReadString() {
ba::async_read_until(
sock,
stream_buffer,
'\0', // null-char is a delimiter
b::bind(&Connection::ReadHandler, this,
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
void AsyncWriteString(const std::string &s) {
std::string newstr = s + '\0'; // add a null char
ba::async_write(
sock,
ba::buffer(newstr.c_str(), newstr.size()),
b::bind(&Connection::WriteHandler, this,
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
virtual void Session() {
AsyncReadString();
service.run(); // run at last
}
std::string ExtractString() {
std::istream is(&stream_buffer);
std::string s;
std::getline(is, s, '\0');
return s;
}
virtual void ReadHandler(
const bs::error_code &ec,
std::size_t bytes_transferred) {
if (!ec) {
std::cout << (ExtractString() + "\n");
std::cout.flush();
AsyncReadString(); // read again
}
else {
// do nothing, "this" will be deleted later
}
}
virtual void WriteHandler(
const bs::error_code &ec,
std::size_t bytes_transferred) {
}
public:
Connection(Server *s) :
service(),
sock(service),
server(s),
thread(NULL)
{ }
socket_type& Socket() {
return sock;
}
void Start() {
if (thread) delete thread;
thread = new b::thread(
b::bind(&Connection::Session, this));
}
void Join() {
if (thread) thread->join();
}
void Stop() {
service.stop();
}
void KillMe();
virtual ~Connection() {
}
};
// a server also has its own io_service but it's only used for accepting
class Server {
public:
std::list<Connection*> Connections;
protected:
ba::io_service service;
acceptor_type acc;
b::thread *thread;
virtual void AcceptHandler(const bs::error_code &ec) {
if (!ec) {
Connections.back()->Start();
Connections.push_back(new Connection(this));
acc.async_accept(
Connections.back()->Socket(),
b::bind(&Server::AcceptHandler,
this,
ba::placeholders::error));
}
else {
// do nothing
// since the new session will be deleted
// automatically by the destructor
}
}
virtual void ThreadFunc() {
Connections.push_back(new Connection(this));
acc.async_accept(
Connections.back()->Socket(),
b::bind(&Server::AcceptHandler,
this,
ba::placeholders::error));
service.run();
}
public:
Server():
service(),
acc(service, ba::ip::tcp::endpoint(ba::ip::tcp::v4(), PORT)),
thread(NULL)
{ }
void Start() {
if (thread) delete thread;
thread = new b::thread(
b::bind(&Server::ThreadFunc, this));
}
void Stop() {
service.stop();
}
void Join() {
if (thread) thread->join();
}
void StopAllConnections() {
for (auto c : Connections) {
c->Stop();
}
}
void JoinAllConnections() {
for (auto c : Connections) {
c->Join();
}
}
void KillAllConnections() {
for (auto c : Connections) {
delete c;
}
Connections.clear();
}
void KillConnection(Connection *c) {
Connections.remove(c);
delete c;
}
virtual ~Server() {
delete thread;
// connection should be deleted by the user (?)
}
};
void Connection::KillMe() {
server->KillConnection(this);
}
int main() {
try {
Server s;
s.Start();
std::cin.get(); // wait for enter
s.Stop(); // stop listening first
s.StopAllConnections(); // interrupt ongoing connections
s.Join(); // wait for server, should return immediately
s.JoinAllConnections(); // wait for ongoing connections
s.KillAllConnections(); // destroy connection objects
// at the end of scope, Server will be destroyed
}
catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
return 0;
}
No. Using an io_service object per connection is definitely a smell. Especially since you're also running each connection on a dedicated thread.
At this point you have to ask yourself what did asynchrony buy you? You can have all the code synchronous and have exactly the same number of threads etc.
Clearly you want to multiplex the connections onto a far smaller number of services. In practice there are a few sensible models like
a single io_service with a single service thread (this is usually good). No tasks queued on the service may ever block for significant time or the latency will suffer
a single io_service with a number of threads executing handlers. The number of threads in the pool should be enough to service the max. number of simultaneous CPU intensive tasks supported (or again, the latency will start to go up)
an io_service per thread, usually one thread per logical core and with thread affinity so that it "sticks" to that core. This can be ideal for cache locality
UPDATE: Demo
Here's a demo that shows the idiomatic style using option 1. from above:
Live On Coliru
#include <boost/array.hpp>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/make_shared.hpp>
#include <boost/thread.hpp>
#include <iostream>
#include <istream>
#include <list>
#include <string>
namespace ba = boost::asio;
namespace bs = boost::system;
namespace b = boost;
typedef ba::ip::tcp::acceptor acceptor_type;
typedef ba::ip::tcp::socket socket_type;
const short PORT = 11235;
// A connection has its own io_service and socket
class Connection : public b::enable_shared_from_this<Connection>
{
public:
typedef boost::shared_ptr<Connection> Ptr;
protected:
socket_type sock;
ba::streambuf stream_buffer; // for reading etc
std::string message;
void AsyncReadString() {
std::cout << __PRETTY_FUNCTION__ << "\n";
ba::async_read_until(
sock,
stream_buffer,
'\0', // null-char is a delimiter
b::bind(&Connection::ReadHandler, shared_from_this(),
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
void AsyncWriteString(const std::string &s) {
std::cout << __PRETTY_FUNCTION__ << "\n";
message = s;
ba::async_write(
sock,
ba::buffer(message.c_str(), message.size()+1),
b::bind(&Connection::WriteHandler, shared_from_this(),
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
std::string ExtractString() {
std::cout << __PRETTY_FUNCTION__ << "\n";
std::istream is(&stream_buffer);
std::string s;
std::getline(is, s, '\0');
return s;
}
void ReadHandler(
const bs::error_code &ec,
std::size_t bytes_transferred)
{
std::cout << __PRETTY_FUNCTION__ << "\n";
if (!ec) {
std::cout << (ExtractString() + "\n");
std::cout.flush();
AsyncReadString(); // read again
}
else {
// do nothing, "this" will be deleted later
}
}
void WriteHandler(const bs::error_code &ec, std::size_t bytes_transferred) {
std::cout << __PRETTY_FUNCTION__ << "\n";
}
public:
Connection(ba::io_service& svc) : sock(svc) { }
virtual ~Connection() {
std::cout << __PRETTY_FUNCTION__ << "\n";
}
socket_type& Socket() { return sock; }
void Session() { AsyncReadString(); }
void Stop() { sock.cancel(); }
};
// a server also has its own io_service but it's only used for accepting
class Server {
public:
std::list<boost::weak_ptr<Connection> > m_connections;
protected:
ba::io_service _service;
boost::optional<ba::io_service::work> _work;
acceptor_type _acc;
b::thread thread;
void AcceptHandler(const bs::error_code &ec, Connection::Ptr accepted) {
if (!ec) {
accepted->Session();
DoAccept();
}
else {
// do nothing the new session will be deleted automatically by the
// destructor
}
}
void DoAccept() {
auto newaccept = boost::make_shared<Connection>(_service);
_acc.async_accept(
newaccept->Socket(),
b::bind(&Server::AcceptHandler,
this,
ba::placeholders::error,
newaccept
));
}
public:
Server():
_service(),
_work(ba::io_service::work(_service)),
_acc(_service, ba::ip::tcp::endpoint(ba::ip::tcp::v4(), PORT)),
thread(b::bind(&ba::io_service::run, &_service))
{ }
~Server() {
std::cout << __PRETTY_FUNCTION__ << "\n";
Stop();
_work.reset();
if (thread.joinable()) thread.join();
}
void Start() {
std::cout << __PRETTY_FUNCTION__ << "\n";
DoAccept();
}
void Stop() {
std::cout << __PRETTY_FUNCTION__ << "\n";
_acc.cancel();
}
void StopAllConnections() {
std::cout << __PRETTY_FUNCTION__ << "\n";
for (auto c : m_connections) {
if (auto p = c.lock())
p->Stop();
}
}
};
int main() {
try {
Server s;
s.Start();
std::cerr << "Shutdown in 2 seconds...\n";
b::this_thread::sleep_for(b::chrono::seconds(2));
std::cerr << "Stop accepting...\n";
s.Stop();
std::cerr << "Shutdown...\n";
s.StopAllConnections(); // interrupt ongoing connections
} // destructor of Server will join the service thread
catch (std::exception &e) {
std::cerr << __FUNCTION__ << ":" << __LINE__ << "\n";
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
std::cerr << "Byebye\n";
}
I modified the main() to run for 2 seconds without user intervention. This is so I can demo it Live On Coliru (of course, it's limited w.r.t the number of client processes).
If you run it with a lot (a lot) of clients, using e.g.
$ time (for a in {1..1000}; do (sleep 1.$RANDOM; echo -e "hello world $RANDOM\\0" | netcat localhost 11235)& done; wait)
You will find that the two second window handles them all:
$ ./test | sort | uniq -c | sort -n | tail
Shutdown in 2 seconds...
Shutdown...
Byebye
2 hello world 28214
2 hello world 4554
2 hello world 6216
2 hello world 7864
2 hello world 9966
2 void Server::Stop()
1000 std::string Connection::ExtractString()
1001 virtual Connection::~Connection()
2000 void Connection::AsyncReadString()
2000 void Connection::ReadHandler(const boost::system::error_code&, std::size_t)
If you really go berserk and raise 1000 to e.g. 100000 there, you'll get things similar to:
sehe#desktop:/tmp$ ./test | sort | uniq -c | sort -n | tail
Shutdown in 2 seconds...
Shutdown...
Byebye
2 hello world 5483
2 hello world 579
2 hello world 5865
2 hello world 938
2 void Server::Stop()
3 hello world 9613
1741 std::string Connection::ExtractString()
1742 virtual Connection::~Connection()
3482 void Connection::AsyncReadString()
3482 void Connection::ReadHandler(const boost::system::error_code&, std::size_t)
On repeated 2-second runs of the server.
Related
I try to open a UDP server. A baby example works (I receive what I expect and what wireshark also shows):
Baby example:
int main(int argc, char* const argv[])
{
try
{
boost::asio::io_context io_context;
boost::asio::ip::udp::endpoint ep(boost::asio::ip::udp::v4(), 60001);
boost::asio::ip::udp::socket sock(io_context, ep);
UDPServer server(std::move(sock), callbackUDP);
io_context.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
UDPServer.hpp:
#include <boost/asio.hpp>
#include <functional>
#include <vector>
#include <thread>
#define BUFFERSIZE 1501
class UDPServer
{
public:
explicit UDPServer(boost::asio::ip::udp::socket socket, std::function<void(const std::vector<char>&)> callbackFunction);
virtual ~UDPServer();
private:
void read();
boost::asio::ip::udp::socket socket_;
boost::asio::ip::udp::endpoint endpoint_;
std::function<void(const std::vector<char>&)> callbackFunction_;
char data_[1500 + 1]; // 1500 bytes is safe limit as it is max of ethernet frame, +1 is for \0 terminator
};
UDPServer.cpp:
#include <iostream>
#include "UDPServer.h"
UDPServer::UDPServer(boost::asio::ip::udp::socket socket, std::function<void(const std::vector<char>&)> callbackFunction):
socket_(std::move(socket)),
callbackFunction_(callbackFunction)
{
read();
}
UDPServer::~UDPServer()
{
}
void UDPServer::read()
{
socket_.async_receive_from(boost::asio::buffer(data_, 1500), endpoint_,
[this](boost::system::error_code ec, std::size_t length)
{
if (ec)
{
return;
}
data_[length] = '\0';
if (strcmp(data_, "\n") == 0)
{
return;
}
std::vector<char> dataVector(data_, data_ + length);
callbackFunction_(dataVector);
read();
}
);
}
Now what I want to convert this to is a class with as constructor only the port and a callback function (let forget about the latter and just print the message for now, adding the callback is normally no problem).
I tried the following, but it doesn't work:
int main(int argc, char* const argv[])
{
UDPServer server(60001);
}
UDPServer.h:
#include <boost/asio.hpp>
#include <functional>
#include <vector>
#include <thread>
#define BUFFERSIZE 1501
class UDPServer
{
public:
explicit UDPServer(uint16_t port);
virtual ~UDPServer();
private:
boost::asio::io_context io_context_;
boost::asio::ip::udp::socket socket_;
boost::asio::ip::udp::endpoint endpoint_;
std::array<char, BUFFERSIZE> recv_buffer_;
std::thread thread_;
void run();
void start_receive();
void handle_reply(const boost::system::error_code& error, std::size_t bytes_transferred);
};
UDPServer.cpp:
#include <iostream>
#include "UDPServer.h"
#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <iostream>
UDPServer::UDPServer(uint16_t port):
endpoint_(boost::asio::ip::udp::endpoint(boost::asio::ip::udp::v4(), port)),
io_context_(),
socket_(io_context_, boost::asio::ip::udp::endpoint(boost::asio::ip::udp::v4(), port)),
thread_(&UDPServer::run, this)
{
start_receive();
}
UDPServer::~UDPServer()
{
io_context_.stop();
thread_.join();
}
void UDPServer::start_receive()
{
socket_.async_receive_from(boost::asio::buffer(recv_buffer_), endpoint_,
boost::bind(&UDPServer::handle_reply, this, boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
}
void UDPServer::handle_reply(const boost::system::error_code& error, std::size_t bytes_transferred)
{
if (!error)
{
try {
std::string string(recv_buffer_.data(), recv_buffer_.data() + bytes_transferred);
std::cout << "Message received: " << std::to_string(bytes_transferred) << ", " << string << std::endl;
}
catch (std::exception ex) {
std::cout << "handle_reply: Error parsing incoming message:" << ex.what() << std::endl;
}
catch (...)
{
std::cout << "handle_reply: Unknown error while parsing incoming message" << std::endl;
}
}
else
{
std::cout << "handle_reply: error: " << error.message() << std::endl;
}
start_receive();
}
void UDPServer::run()
{
try {
io_context_.run();
} catch( const std::exception& e )
{
std::cout << "Server network exception: " << e.what() << std::endl;
}
catch(...)
{
std::cout << "Unknown exception in server network thread" << std::endl;
}
std::cout << "Server network thread stopped" << std::endl;
};
When running I get "Server network thread stopped". io_context doesn't seem to start and doesn't block. Someone an idea what I do wrong? Thanks a lot!
EDIT tried this after comment, same result (except that message comes after 1 second)
UDPServer::UDPServer(uint16_t port):
endpoint_(boost::asio::ip::udp::endpoint(boost::asio::ip::udp::v4(), port)),
io_context_(),
socket_(io_context_, boost::asio::ip::udp::endpoint(boost::asio::ip::udp::v4(), port))
{
start_receive();
std::this_thread::sleep_for (std::chrono::seconds(1));
thread_ = std::thread(&UDPServer::run, this);
}
Your destructor explicitly tells the service to stop:
UDPServer::~UDPServer() {
io_context_.stop();
thread_.join();
}
That's part of your problem. The other part is as pointed out in the comment: you have a race condition where the thread exits before you even post your first async operation.
Solve it by adding a work guard:
boost::asio::io_context io_;
boost::asio::executor_work_guard<boost::asio::io_context::executor_type> work_ {io_.get_executor()};
Now the destructor can be:
UDPServer::~UDPServer() {
work_.reset(); // allow service to run out of work
thread_.join();
}
Other notes:
avoid chaining back to start_receive when there was an error
std::to_string was redundant
the order of initialization for members is defined by the order of their declaration, not their initializers in the initializer list. Catch these bug sources with -Wall -Wextra -pedantic
= handle exceptions in your service thread (see Should the exception thrown by boost::asio::io_service::run() be caught?)
I'd suggest std::bind over boost::bind:
std::bind(&UDPServer::handle_reply, this,
std::placeholders::_1,
std::placeholders::_2));
Or just use a lambda:
[this](error_code ec, size_t xfer) { handle_reply(ec, xfer); });
LIVE DEMO
Compiler Explorer
#include <boost/asio.hpp>
#include <fstream>
#include <functional>
#include <iomanip>
#include <iostream>
#include <thread>
#include <vector>
using boost::asio::ip::udp;
using boost::system::error_code;
using boost::asio::io_context;
#define BUFFERSIZE 1501
class UDPServer {
public:
explicit UDPServer(uint16_t port);
virtual ~UDPServer();
private:
io_context io_;
boost::asio::executor_work_guard<io_context::executor_type> work_ {io_.get_executor()};
udp::endpoint endpoint_;
udp::socket socket_;
std::array<char, BUFFERSIZE> recv_buffer_;
std::thread thread_;
void run();
void start_receive();
void handle_reply(const error_code& error, size_t transferred);
};
UDPServer::UDPServer(uint16_t port)
: endpoint_(udp::endpoint(udp::v4(), port)),
socket_(io_, endpoint_),
thread_(&UDPServer::run, this) {
start_receive();
}
UDPServer::~UDPServer() {
work_.reset(); // allow service to run out of work
thread_.join();
}
void UDPServer::start_receive() {
socket_.async_receive_from(boost::asio::buffer(recv_buffer_), endpoint_,
#if 0
std::bind(&UDPServer::handle_reply, this,
std::placeholders::_1,
std::placeholders::_2));
#else
[this](error_code ec, size_t xfer) { handle_reply(ec, xfer); });
#endif
}
void UDPServer::handle_reply(const error_code& error, size_t transferred) {
if (!error) {
try {
std::string_view s(recv_buffer_.data(), transferred);
std::cout << "Message received: " << transferred << ", "
<< std::quoted(s) << "\n";
} catch (std::exception const& ex) {
std::cout << "handle_reply: Error parsing incoming message:"
<< ex.what() << "\n";
} catch (...) {
std::cout
<< "handle_reply: Unknown error while parsing incoming message\n";
}
start_receive();
} else {
std::cout << "handle_reply: error: " << error.message() << "\n";
}
}
void UDPServer::run() {
while (true) {
try {
if (io_.run() == 0u) {
break;
}
} catch (const std::exception& e) {
std::cout << "Server network exception: " << e.what() << "\n";
} catch (...) {
std::cout << "Unknown exception in server network thread\n";
}
}
std::cout << "Server network thread stopped\n";
}
int main() {
std::cout << std::unitbuf;
UDPServer server(60001);
}
Testing with random words:
sort -R /etc/dictionaries-common/words | while read w; do sleep 1; netcat -u localhost 60001 -w 0 <<<"$w"; done
Live output:
The server starts and accepts connections, all clients, even if more than 10 are connected, send a message but there is no response.
The read and write function uses the index of the received client's account and works with it. Therefore, there is an additional parameter in the headers.
We accept the connection and pass its number to the header and there with the socket of this number we are working.
#include <iostream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <clocale>
#include <vector>
#include <conio.h>
using namespace boost::asio;
using namespace std;
class tcp_server
{
private:
io_service service;
int port;
enum { buff_size = 1024 };
ip::tcp::endpoint endpoint;
ip::tcp::acceptor acceptor;
int countClients = 0;
int accept_i = 0;
struct client
{
ip::tcp::socket sock;
char buff[buff_size] = { };
};
vector<client> clients;
public:
tcp_server(io_service& service, int port) : service(), acceptor(service), endpoint(ip::tcp::v4(), port)
{
this->port;
acceptor.open(endpoint.protocol());
acceptor.set_option(ip::tcp::acceptor::reuse_address(true));
acceptor.bind(endpoint);
acceptor.listen();
clients.reserve(10);
}
void start()
{
start_service_in_thread();
}
void start_service_in_thread()
{
for (int i = 0; i < 10; ++i)
boost::thread(service_func_for_thread);
for (int i = 0; i < 10; ++i)
{
boost::thread(acceptor_func_for_thread);
accept_i++;
}
}
void service_func_for_thread()
{
service.run();
}
void accept_handler(const boost::system::error_code& error)
{
if (!error)
{
countClients++;
do_read_this(countClients - 1);
}
else
{
cout << "Acceptor error\n";
cout << error.message() << endl;
}
}
void acceptor_func_for_thread()
{
acceptor.async_accept(
clients[accept_i].sock,
boost::bind(&tcp_server::accept_handler, this, boost::asio::placeholders::error)
);
}
void do_read_this(int thisClientIndex)
{
clients[thisClientIndex].sock.async_read_some(
buffer(clients[thisClientIndex].buff),
boost::bind(&tcp_server::read_handler,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred,
thisClientIndex)
);
}
void read_handler(const boost::system::error_code& error, size_t bytes_transferred, int thisClientIndex)
{
if (!error)
{
clients[thisClientIndex].sock.async_write_some(
buffer(clients[thisClientIndex].buff),
boost::bind(&tcp_server::write_handler,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred,
thisClientIndex)
);
}
else
{
cout << "Error reading from socket\n";
cout << error.message() << endl;
}
}
void write_handler(const boost::system::error_code& error, size_t bytes_transferred, int thisClientIndex)
{
if (!error)
{
do_read_this(thisClientIndex);
}
else
{
cout << "Error write in socket\n";
cout << error.message() << endl;
}
}
};
int main(int argc, char *argv[])
{
try
{
setlocale(LC_ALL, "Rus");
io_service service;
tcp_server* server = new tcp_server{ service, 5000 };
server->start();
service.run();
}
catch (exception& ex)
{
cout << "Exception: " << ex.what();
}
return 0;
}
The client connects to the server and when it sends a connection, no response is received.
Please help.
service.run(); in main has nothing to do so it returns immediately so main returns causing program to end.
Creating background threads is not necessary here.
You are (again) creating a temporary objects boost::thread that immediately go out of scope. And unless BOOST_THREAD_PROVIDES_THREAD_DESTRUCTOR_CALLS_TERMINATE_IF_JOINABLE is specified you will end up with a bunch of detached threads.
When the io_service::run() method has no work to do, it returns.
You should either
post() at least one task to the io_service before calling run(),
or "lock" it with io_service::work
io_service service;
boost::asio::io_service::work work(service);
The latter requires a call to service.stop() to cause run() to exit, otherwise it will run eternally.
Note however: you don't really need two io_services or any threads in an async application.
I recently met a problem with boost::asio asynchronous tasks. I want to return a pointer on an object listening to a port.
It works when I use the socket.read_some method but this method blocks my main and I want my MyClass::create method to return.
So I tried a async_read call but I saw that inside my read() method, no asynchronous tasks are launched. I tried to figure out what may cause the problem but see no solution to this issue.
Here is my code, here it's not with an async_read but with an async_wait, and the same problem appears, the timer is not launched.
Thanks for any help I might get.
The header file:
#ifndef MYCLASS_HPP
#define MYCLASS_HPP
#include <memory>
#include <boost/asio.hpp>
class MyClass
{
public:
MyClass(boost::asio::io_service& ios);
void read();
void read_handler(const boost::system::error_code& error);
static std::shared_ptr<MyClass> create(std:: string const & host, uint16_t port);
bool connect (std::string const & host, uint16_t port);
void connect_handler(const boost::system::error_code& error);
boost::asio::ip::tcp::socket m_socket;
bool m_flag;
std::vector<uint8_t> m_buffer;
};
#endif
Source file:
#include "MyClass.hpp"
#include <boost/bind.hpp>
MyClass::MyClass(boost::asio::io_service& ios)
:m_flag(false), m_socket(ios), m_buffer(20)
{
}
void MyClass::read_handler(const boost::system::error_code& er)
{
std::cout << "Timer waited 5 sec" << std::endl;
}
void MyClass::read()
{
boost::asio::deadline_timer t(m_socket.get_io_service(),boost::posix_time::seconds(5));
t.async_wait(boost::bind(&MyClass::read_handler,this,boost::asio::placeholders::error));
m_socket.get_io_service().run();//Should make the io_service wait for all asynchronous tasks to finish
std::cout << "This message should be displayed after the wait" << std::endl;
}
void MyClass::connect_handler(const boost::system::error_code& error)
{
if(!error)
{
std::cout << "Connection done" << std::endl;
m_flag = 1;
}
else
{
std::cout << "Error in connection: " << error.message() << std::endl;
}
}
//connect method
bool MyClass::connect(std::string const & host, uint16_t port)
{
boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::address::from_string(host),port);
m_socket.async_connect(endpoint,
boost::bind(&MyClass::connect_handler, this,
boost::asio::placeholders::error));
m_socket.get_io_service().run();//Wait async_connect and connect_handler to finish
if (m_flag == 0) return false;
else return true;
}
std::shared_ptr<MyClass> MyClass::create(std:: string const & host, uint16_t port)
{
boost::asio::io_service ios;
std::shared_ptr<MyClass> ptr(new MyClass(ios));
bool bol = ptr->connect(host, port);
ptr->read();
//while(1){}
if(bol == true)
{
//connection success, reading currently listening, pointer is returned to the user
return ptr;
}
else
{
//connection failure, pointer is still returned to the user but not listening as he's not connected
return ptr;
}
}
And my main:
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/asio.hpp>
#include "MyClass.hpp"
int main()
{
try
{
std::cout << "Creation of instance" << std::endl;
std::shared_ptr <MyClass> var = MyClass::create("127.0.0.1", 8301);
std::cout << "Instance created" << std::endl;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
I figured out how to solve my problem.
I had indeed problems with io_service being destroyed after "create" method, so the pointer returned in the main was not able to continue reading.
I had to call run() at one point to launch callbacks but i couldn't do it in the main, as I wanted the main to keep doing other things.
So I created a class launching a separated thread and containing an io_service. That thread is calling run() periodically. It was then added as an attribute to MyClass.
Now I have the call to "create" returning a pointer to MyClass who doesn't stop whatever asynchronous task was launched in MyClass.
I want to create an application that implements one-thread-per-connection model. But each connection must be stoppable. I have tried this boost.asio example which implements the blocking version of what I want. But after a little bit questioning I've found out that there is no reliable way to stop the session of that example. So I've tried to implement my own. I had to use asynchronous functions. Since I want to make a thread to manage only one connection and there is no way to control which asynchronous job is employed to which thread, I decided to use io_service for each connection/socket/thread.
So is it a good approach, do you know a better approach?
My code is here so you can examine and review it:
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/array.hpp>
#include <boost/thread.hpp>
#include <boost/scoped_ptr.hpp>
#include <list>
#include <iostream>
#include <string>
#include <istream>
namespace ba = boost::asio;
namespace bs = boost::system;
namespace b = boost;
typedef ba::ip::tcp::acceptor acceptor_type;
typedef ba::ip::tcp::socket socket_type;
const short PORT = 11235;
class Server;
// A connection has its own io_service and socket
class Connection {
protected:
ba::io_service service;
socket_type sock;
b::thread *thread;
ba::streambuf stream_buffer; // for reading etc
Server *server;
void AsyncReadString() {
ba::async_read_until(
sock,
stream_buffer,
'\0', // null-char is a delimiter
b::bind(&Connection::ReadHandler, this,
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
void AsyncWriteString(const std::string &s) {
std::string newstr = s + '\0'; // add a null char
ba::async_write(
sock,
ba::buffer(newstr.c_str(), newstr.size()),
b::bind(&Connection::WriteHandler, this,
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
virtual void Session() {
AsyncReadString();
service.run(); // run at last
}
std::string ExtractString() {
std::istream is(&stream_buffer);
std::string s;
std::getline(is, s, '\0');
return s;
}
virtual void ReadHandler(
const bs::error_code &ec,
std::size_t bytes_transferred) {
if (!ec) {
std::cout << (ExtractString() + "\n");
std::cout.flush();
AsyncReadString(); // read again
}
else {
// do nothing, "this" will be deleted later
}
}
virtual void WriteHandler(
const bs::error_code &ec,
std::size_t bytes_transferred) {
}
public:
Connection(Server *s) :
service(),
sock(service),
server(s),
thread(NULL)
{ }
socket_type& Socket() {
return sock;
}
void Start() {
if (thread) delete thread;
thread = new b::thread(
b::bind(&Connection::Session, this));
}
void Join() {
if (thread) thread->join();
}
void Stop() {
service.stop();
}
void KillMe();
virtual ~Connection() {
}
};
// a server also has its own io_service but it's only used for accepting
class Server {
public:
std::list<Connection*> Connections;
protected:
ba::io_service service;
acceptor_type acc;
b::thread *thread;
virtual void AcceptHandler(const bs::error_code &ec) {
if (!ec) {
Connections.back()->Start();
Connections.push_back(new Connection(this));
acc.async_accept(
Connections.back()->Socket(),
b::bind(&Server::AcceptHandler,
this,
ba::placeholders::error));
}
else {
// do nothing
// since the new session will be deleted
// automatically by the destructor
}
}
virtual void ThreadFunc() {
Connections.push_back(new Connection(this));
acc.async_accept(
Connections.back()->Socket(),
b::bind(&Server::AcceptHandler,
this,
ba::placeholders::error));
service.run();
}
public:
Server():
service(),
acc(service, ba::ip::tcp::endpoint(ba::ip::tcp::v4(), PORT)),
thread(NULL)
{ }
void Start() {
if (thread) delete thread;
thread = new b::thread(
b::bind(&Server::ThreadFunc, this));
}
void Stop() {
service.stop();
}
void Join() {
if (thread) thread->join();
}
void StopAllConnections() {
for (auto c : Connections) {
c->Stop();
}
}
void JoinAllConnections() {
for (auto c : Connections) {
c->Join();
}
}
void KillAllConnections() {
for (auto c : Connections) {
delete c;
}
Connections.clear();
}
void KillConnection(Connection *c) {
Connections.remove(c);
delete c;
}
virtual ~Server() {
delete thread;
// connection should be deleted by the user (?)
}
};
void Connection::KillMe() {
server->KillConnection(this);
}
int main() {
try {
Server s;
s.Start();
std::cin.get(); // wait for enter
s.Stop(); // stop listening first
s.StopAllConnections(); // interrupt ongoing connections
s.Join(); // wait for server, should return immediately
s.JoinAllConnections(); // wait for ongoing connections
s.KillAllConnections(); // destroy connection objects
// at the end of scope, Server will be destroyed
}
catch (std::exception &e) {
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
return 0;
}
No. Using an io_service object per connection is definitely a smell. Especially since you're also running each connection on a dedicated thread.
At this point you have to ask yourself what did asynchrony buy you? You can have all the code synchronous and have exactly the same number of threads etc.
Clearly you want to multiplex the connections onto a far smaller number of services. In practice there are a few sensible models like
a single io_service with a single service thread (this is usually good). No tasks queued on the service may ever block for significant time or the latency will suffer
a single io_service with a number of threads executing handlers. The number of threads in the pool should be enough to service the max. number of simultaneous CPU intensive tasks supported (or again, the latency will start to go up)
an io_service per thread, usually one thread per logical core and with thread affinity so that it "sticks" to that core. This can be ideal for cache locality
UPDATE: Demo
Here's a demo that shows the idiomatic style using option 1. from above:
Live On Coliru
#include <boost/array.hpp>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/make_shared.hpp>
#include <boost/thread.hpp>
#include <iostream>
#include <istream>
#include <list>
#include <string>
namespace ba = boost::asio;
namespace bs = boost::system;
namespace b = boost;
typedef ba::ip::tcp::acceptor acceptor_type;
typedef ba::ip::tcp::socket socket_type;
const short PORT = 11235;
// A connection has its own io_service and socket
class Connection : public b::enable_shared_from_this<Connection>
{
public:
typedef boost::shared_ptr<Connection> Ptr;
protected:
socket_type sock;
ba::streambuf stream_buffer; // for reading etc
std::string message;
void AsyncReadString() {
std::cout << __PRETTY_FUNCTION__ << "\n";
ba::async_read_until(
sock,
stream_buffer,
'\0', // null-char is a delimiter
b::bind(&Connection::ReadHandler, shared_from_this(),
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
void AsyncWriteString(const std::string &s) {
std::cout << __PRETTY_FUNCTION__ << "\n";
message = s;
ba::async_write(
sock,
ba::buffer(message.c_str(), message.size()+1),
b::bind(&Connection::WriteHandler, shared_from_this(),
ba::placeholders::error,
ba::placeholders::bytes_transferred));
}
std::string ExtractString() {
std::cout << __PRETTY_FUNCTION__ << "\n";
std::istream is(&stream_buffer);
std::string s;
std::getline(is, s, '\0');
return s;
}
void ReadHandler(
const bs::error_code &ec,
std::size_t bytes_transferred)
{
std::cout << __PRETTY_FUNCTION__ << "\n";
if (!ec) {
std::cout << (ExtractString() + "\n");
std::cout.flush();
AsyncReadString(); // read again
}
else {
// do nothing, "this" will be deleted later
}
}
void WriteHandler(const bs::error_code &ec, std::size_t bytes_transferred) {
std::cout << __PRETTY_FUNCTION__ << "\n";
}
public:
Connection(ba::io_service& svc) : sock(svc) { }
virtual ~Connection() {
std::cout << __PRETTY_FUNCTION__ << "\n";
}
socket_type& Socket() { return sock; }
void Session() { AsyncReadString(); }
void Stop() { sock.cancel(); }
};
// a server also has its own io_service but it's only used for accepting
class Server {
public:
std::list<boost::weak_ptr<Connection> > m_connections;
protected:
ba::io_service _service;
boost::optional<ba::io_service::work> _work;
acceptor_type _acc;
b::thread thread;
void AcceptHandler(const bs::error_code &ec, Connection::Ptr accepted) {
if (!ec) {
accepted->Session();
DoAccept();
}
else {
// do nothing the new session will be deleted automatically by the
// destructor
}
}
void DoAccept() {
auto newaccept = boost::make_shared<Connection>(_service);
_acc.async_accept(
newaccept->Socket(),
b::bind(&Server::AcceptHandler,
this,
ba::placeholders::error,
newaccept
));
}
public:
Server():
_service(),
_work(ba::io_service::work(_service)),
_acc(_service, ba::ip::tcp::endpoint(ba::ip::tcp::v4(), PORT)),
thread(b::bind(&ba::io_service::run, &_service))
{ }
~Server() {
std::cout << __PRETTY_FUNCTION__ << "\n";
Stop();
_work.reset();
if (thread.joinable()) thread.join();
}
void Start() {
std::cout << __PRETTY_FUNCTION__ << "\n";
DoAccept();
}
void Stop() {
std::cout << __PRETTY_FUNCTION__ << "\n";
_acc.cancel();
}
void StopAllConnections() {
std::cout << __PRETTY_FUNCTION__ << "\n";
for (auto c : m_connections) {
if (auto p = c.lock())
p->Stop();
}
}
};
int main() {
try {
Server s;
s.Start();
std::cerr << "Shutdown in 2 seconds...\n";
b::this_thread::sleep_for(b::chrono::seconds(2));
std::cerr << "Stop accepting...\n";
s.Stop();
std::cerr << "Shutdown...\n";
s.StopAllConnections(); // interrupt ongoing connections
} // destructor of Server will join the service thread
catch (std::exception &e) {
std::cerr << __FUNCTION__ << ":" << __LINE__ << "\n";
std::cerr << "Exception: " << e.what() << std::endl;
return 1;
}
std::cerr << "Byebye\n";
}
I modified the main() to run for 2 seconds without user intervention. This is so I can demo it Live On Coliru (of course, it's limited w.r.t the number of client processes).
If you run it with a lot (a lot) of clients, using e.g.
$ time (for a in {1..1000}; do (sleep 1.$RANDOM; echo -e "hello world $RANDOM\\0" | netcat localhost 11235)& done; wait)
You will find that the two second window handles them all:
$ ./test | sort | uniq -c | sort -n | tail
Shutdown in 2 seconds...
Shutdown...
Byebye
2 hello world 28214
2 hello world 4554
2 hello world 6216
2 hello world 7864
2 hello world 9966
2 void Server::Stop()
1000 std::string Connection::ExtractString()
1001 virtual Connection::~Connection()
2000 void Connection::AsyncReadString()
2000 void Connection::ReadHandler(const boost::system::error_code&, std::size_t)
If you really go berserk and raise 1000 to e.g. 100000 there, you'll get things similar to:
sehe#desktop:/tmp$ ./test | sort | uniq -c | sort -n | tail
Shutdown in 2 seconds...
Shutdown...
Byebye
2 hello world 5483
2 hello world 579
2 hello world 5865
2 hello world 938
2 void Server::Stop()
3 hello world 9613
1741 std::string Connection::ExtractString()
1742 virtual Connection::~Connection()
3482 void Connection::AsyncReadString()
3482 void Connection::ReadHandler(const boost::system::error_code&, std::size_t)
On repeated 2-second runs of the server.
What I want is when one message queue receives an int N, the handler function will be called after N seconds. below is my code.
It runs OK if the duration seconds of two near message queue is larger than the int N, but the handler will print "Operation canceled" in one handler when the duration seconds between two received message queues are smaller than N, which is not what I want.
I'd appreciate a lot for any help.
#include <boost/asio.hpp>
#include <zmq.h>
#include <boost/thread.hpp>
#include <iostream>
boost::asio::io_service io_service;
void* context = zmq_ctx_new();
void* sock_pull = zmq_socket(context, ZMQ_PULL);
void handler(const boost::system::error_code &ec) {
std::cout << "hello, world" << "\t" << ec.message() << std::endl;
}
void run() {
io_service.run();
}
void thread_listener() {
int nRecv;
boost::asio::deadline_timer timer(io_service, boost::posix_time::seconds(0));
while( true ) {
zmq_recv(sock_pull, &nRecv, sizeof(nRecv), 0);
std::cout << nRecv << std::endl;
timer.expires_from_now(boost::posix_time::seconds(nRecv));
timer.async_wait(handler);
}
}
int main(int argc, char* argv[]) {
boost::asio::io_service::work work(io_service);
zmq_bind(sock_pull, "tcp://*:60000");
boost::thread tThread(thread_listener);
boost::thread tThreadRun(run);
tThread.join();
tThreadRun.join();
return 0;
}
When you call
timer.expires_from_now(boost::posix_time::seconds(nRecv));
this, as the documentation states, cancels any async timer pending.
If you want to have overlapping requests in flight at a given time, one timer is clearly not enough. Luckily there is a wellknown pattern around bound shared pointers in Asio that you can use to mimick a "session" per response.
Say you define a session to contain it's own private timer:
struct session : boost::enable_shared_from_this<session> {
session(boost::asio::io_service& svc, int N) :
timer(svc, boost::posix_time::seconds(N))
{
// Note: shared_from_this is not allowed from ctor
}
void start() {
// it's critical that the completion handler is bound to a shared
// pointer so the handler keeps the session alive:
timer.async_wait(boost::bind(&session::handler, shared_from_this(), boost::asio::placeholders::error));
}
private:
void handler(const boost::system::error_code &ec) {
std::cout << "hello, world" << "\t" << ec.message() << std::endl;
}
boost::asio::deadline_timer timer;
};
Now, it's trivial to replace the code that used the hardcoded timer instance:
timer.expires_from_now(boost::posix_time::seconds(nRecv));
timer.async_wait(handler);
with the session start:
boost::make_shared<session>(io_service, nRecv)->start();
A fully working example (with suitably stubbed ZMQ stuff): Live On Coliru
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/make_shared.hpp>
#include <iostream>
boost::asio::io_service io_service;
/////////////////////////////////////////////////////////////////////////
// I love stubbing out stuff I don't want to install just to help others
enum { ZMQ_PULL };
static void* zmq_ctx_new() { return nullptr; }
static void* zmq_socket(void*,int) { return nullptr; }
static void zmq_bind(void*,char const*) {}
static void zmq_recv(void*,int*data,size_t,int)
{
boost::this_thread::sleep_for(boost::chrono::milliseconds(rand()%1000));
*data = 2;
}
// End of stubs :)
/////////////////////////////////////////////////////////////////////////
void* context = zmq_ctx_new();
void* sock_pull = zmq_socket(context, ZMQ_PULL);
struct session : boost::enable_shared_from_this<session> {
session(boost::asio::io_service& svc, int N) :
timer(svc, boost::posix_time::seconds(N))
{
// Note: shared_from_this is not allowed from ctor
}
void start() {
// it's critical that the completion handler is bound to a shared
// pointer so the handler keeps the session alive:
timer.async_wait(boost::bind(&session::handler, shared_from_this(), boost::asio::placeholders::error));
}
~session() {
std::cout << "bye (session end)\n";
}
private:
void handler(const boost::system::error_code &ec) {
std::cout << "hello, world" << "\t" << ec.message() << std::endl;
}
boost::asio::deadline_timer timer;
};
void run() {
io_service.run();
}
void thread_listener() {
int nRecv = 0;
for(int n=0; n<4; ++n) {
zmq_recv(sock_pull, &nRecv, sizeof(nRecv), 0);
std::cout << nRecv << std::endl;
boost::make_shared<session>(io_service, nRecv)->start();
}
}
int main() {
auto work = boost::make_shared<boost::asio::io_service::work>(io_service);
zmq_bind(sock_pull, "tcp://*:60000");
boost::thread tThread(thread_listener);
boost::thread tThreadRun(run);
tThread.join();
work.reset();
tThreadRun.join();
}