Related
I have to handle information from 100 ports in parallel for 100ms per second.
I am using Ubuntu OS.
I did some research and i saw that poll() function is a good candidate, to avoid to open 100 threads to handle in parallel data coming on udp protocol.
I did main part with boost and I tried to integrate poll() with boost.
The problem is when i am trying to send by client data to the server, I receive nothing.
According to wireshark, data are coming on the right host. (localhost, port 1234)
Did I miss something or did I put something wrong ?
The test code (server) :
#include <deque>
#include <iostream>
#include <chrono>
#include <thread>
#include <sys/poll.h>
#include <boost/optional.hpp>
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
using boost::asio::ip::udp;
using namespace boost::asio;
using namespace std::chrono_literals;
std::string ip_address = "127.0.0.1";
template<typename T, size_t N>
size_t arraySize( T(&)[N] )
{
return(N);
}
class UdpReceiver
{
using Resolver = udp::resolver;
using Sockets = std::deque<udp::socket>;
using EndPoint = udp::endpoint;
using Buffer = std::array<char, 100>; // receiver buffer
public:
explicit UdpReceiver()
: work_(std::ref(resolver_context)), thread_( [this]{ resolver_context.run(); })
{ }
~UdpReceiver()
{
work_ = boost::none; // using work to keep run active always !
thread_.join();
}
void async_resolve(udp::resolver::query const& query_) {
resolver_context.post([this, query_] { do_resolve(query_); });
}
// callback for event-loop in main thread
void run_handler(int fd_idx) {
// start reading
auto result = read(fd_idx, receive_buf.data(), sizeof(Buffer));
// increment number of received packets
received_packets = received_packets + 1;
std::cout << "Received bytes " << result << " current recorded packets " << received_packets <<'\n';
// run handler posted from resolver threads
handler_context.poll();
handler_context.reset();
}
static void handle_receive(boost::system::error_code error, udp::resolver::iterator const& iterator) {
std::cout << "handle_resolve:\n"
" " << error.message() << "\n";
if (!error)
std::cout << " " << iterator->endpoint() << "\n";
}
// get current file descriptor
int fd(size_t idx)
{
return sockets[idx].native_handle();
}
private:
void do_resolve(boost::asio::ip::udp::resolver::query const& query_) {
boost::system::error_code error;
Resolver resolver(resolver_context);
Resolver::iterator result = resolver.resolve(query_, error);
sockets.emplace_back(udp::socket(resolver_context, result->endpoint()));
// post handler callback to service running in main thread
resolver_context.post(boost::bind(&UdpReceiver::handle_receive, error, result));
}
private:
Sockets sockets;
size_t received_packets = 0;
EndPoint remote_receiver;
Buffer receive_buf {};
io_context resolver_context;
io_context handler_context;
boost::optional<boost::asio::io_context::work> work_;
std::thread thread_;
};
int main (int argc, char** argv)
{
UdpReceiver udpReceiver;
udpReceiver.async_resolve(udp::resolver::query(ip_address, std::to_string(1234)));
//logic
pollfd fds[2] { };
for(int i = 0; i < arraySize(fds); ++i)
{
fds[i].fd = udpReceiver.fd(0);
fds[i].events = 0;
fds[i].events |= POLLIN;
fcntl(fds[i].fd, F_SETFL, O_NONBLOCK);
}
// simple event-loop
while (true) {
if (poll(fds, arraySize(fds), -1)) // waiting for wakeup call. Timeout - inf
{
for(auto &fd : fds)
{
if(fd.revents & POLLIN) // checking if we have something to read
{
fd.revents = 0; // reset kernel message
udpReceiver.run_handler(fd.fd); // call resolve handler. Do read !
}
}
}
}
return 0;
}
This looks like a confused mix of C style poll code and Asio code. The point is
you don't need poll (Asio does it internally (or epoll/select/kqueue/IOCP - whatever is available)
UDP is connectionless, so you don't need more than one socket to receive all "connections" (senders)
I'd replace it all with a single udp::socket on a single thread. You don't even have to manage the thread/work:
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
Let's run an asynchronous receive loop for 5s:
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
At the end, we can report the statistics:
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
With a similated load in bash:
function client() { while read a; do echo "$a" > /dev/udp/localhost/1234 ; done < /etc/dictionaries-common/words; }
for a in {1..20}; do client& done; time wait
We get:
A total of 294808 were received from 28215 unique senders
real 0m5,007s
user 0m0,801s
sys 0m0,830s
This is obviously not optimized, the bottle neck here is likely the many many bash subshells being launched for the clients.
Full Listing
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
#include <iostream>
#include <set>
namespace net = boost::asio;
using boost::asio::ip::udp;
using boost::system::error_code;
using namespace std::chrono_literals;
int main ()
{
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
std::set<udp::endpoint> unique_senders;
size_t received_packets = 0;
{
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
}
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
}
I am trying to build a simple IPC protocol using Boost Asio where the server side will be sending a struct that contains a vector<uint8_t> to the client. I was suggested to use a scatter/gather IO approach, but I can't get it working, as it seems the client is only receiving part of the data it is expecting and it keeps waiting indefinitely for the rest of the data to arrive even though it should already be there.
This is what I have right now:
// File: client.cpp
#include <iostream>
#include <vector>
#include <boost/asio.hpp>
#include "ipc_common.hpp"
namespace ba = boost::asio;
using boost::asio::ip::tcp;
int main(int argc, char *argv[])
{
ba::io_context io;
std::vector<std::string> args(argv, argv + argc);
switch (args.size()) {
case 1:
args = {args.at(0), "localhost", "6869"};
break;
case 2:
args = {args.at(0), args.at(1), "6869"};
break;
case 3:
args = {args.at(0), args.at(1), args.at(2)};
break;
default:
std::clog << "usage: " << args.at(0) << " [host = localhost] [port = 6869]" << std::endl;
return 1;
}
try {
propertiesPacket properties;
properties.val1 = 9;
properties.val2 = 45;
tcp::socket socket(io);
tcp::resolver resolver(io);
connect(socket, resolver.resolve(args.at(1), args.at(2)));
write(socket, ba::buffer(&properties, sizeof(properties)));
uint16_t responseSize {};
ba::read(socket, ba::buffer(&responseSize, sizeof(uint16_t)));
std::clog << "client responseSize: " << responseSize << std::endl;
processedData response {};
std::vector<ba::mutable_buffer> responseBuffers {
ba::buffer(&response.size, sizeof(uint16_t)),
ba::buffer(&response.values, responseSize - sizeof(uint8_t))
};
ba::read(socket, responseBuffers);
std::clog << response.serialize();
return 0;
} catch (std::exception &e) {
std::clog << e.what() << std::endl;
return 1;
}
}
// File: server.cpp
#include <vector>
#include <boost/asio.hpp>
#include "ipc_common.hpp"
namespace ba = boost::asio;
using boost::asio::ip::tcp;
using boost::system::error_code;
using TCPSocket = tcp::socket;
class ServerConnection
: public std::enable_shared_from_this<ServerConnection>
{
public:
ServerConnection(TCPSocket socket)
: socket_(std::move(socket))
{ }
void start()
{
std::clog << __PRETTY_FUNCTION__ << std::endl;
doRead();
}
private:
void doRead()
{
std::clog << __PRETTY_FUNCTION__ << std::endl;
auto self(shared_from_this());
socket_.async_read_some(ba::buffer(&properties_, sizeof(properties_)),
[this, self](error_code ec, std::size_t length)
{
std::clog << "received " << length << std::endl;
if (!ec) {
processData();
std::vector<ba::const_buffer> msg {
ba::buffer(&filePacketSize_, sizeof(uint16_t)),
ba::buffer(&filePacket_.val, sizeof(filePacket_.val)),
ba::buffer(&filePacket_.values, sizeof(filePacket_.values))};
std::clog << "filePacketSize_: " << filePacketSize_ << std::endl;
ba::async_write(socket_, msg,
[this, self = shared_from_this()](error_code ec, std::size_t length)
{
std::clog << "written " << length << std::endl;
if (!ec) doRead();
});
}
});
}
void processData()
{
filePacket_.val = properties_.val1;
// Just for demonstration, we fill the vector with random values
std::random_device rd;
std::mt19937 re(rd()) ;
std::uniform_int_distribution<uint8_t> dist(0, 255);
for (size_t i {}; i < filePacket_.val; ++i) {
processedData.values.push_back(dist(re));
}
}
TCPSocket socket_;
propertiesPacket properties_;
processedData filePacket_;
uint16_t filePacketSize_;
};
class Server
{
public:
using IOContext = ba::io_context;
using TCPAcceptor = tcp::acceptor;
Server(IOContext& io, uint16_t port)
: socket_(io),
acceptor_(io, {tcp::v4(), port})
{
doAccept();
}
private:
void doAccept()
{
std::clog << __PRETTY_FUNCTION__ << std::endl;
acceptor_.async_accept(socket_,
[this](error_code ec)
{
if (!ec) {
std::clog << "Accepted " << socket_.remote_endpoint() << std::endl;
std::make_shared<ServerConnection>(std::move(socket_))->start();
doAccept();
}
else {
std::clog << "Accept " << ec.message() << std::endl;
}
});
}
TCPSocket socket_;
TCPAcceptor acceptor_;
};
int main(int argc, char* argv[])
{
std::vector<std::string> args(argv, argv + argc);
switch (args.size()) {
case 1:
args = {args.at(0), "6869"};
break;
case 2:
args = {args.at(0), args.at(1)};
break;
default:
std::clog << "usage: " << args.at(0) << " [port = 6869]" << std::endl;
return 1;
}
try {
ba::io_context io;
Server server(io, std::stoi(args.at(1)));
io.run();
} catch (std::exception &e) {
std::clog << e.what() << std::endl;
return 1;
}
return 0;
}
// File: ipc_common.hpp
#include <cstdint>
#include <vector>
#include <sstream>
#include <string>
struct propertiesPacket
{
uint8_t val1;
uint8_t val2;
};
struct processedData
{
uint8_t val;
std::vector<uint8_t> values;
std::string serialize()
{
std::stringstream sstream;
sstream << "val: " << (unsigned int)val << std::endl;
for (const auto &i : values)
{
sstream << i << " ";
}
sstream << std::endl;
return sstream.str();
}
};
What am I doing wrong?
The sample seems corrupted.
For one, args.at(3) and args.at(4) will by definition always throw, because by definition the switch statement earlier will always exit the client when there are more than 2 command line arguments (default:).
Secondly, the client read uses &response.size but no such member exists at all.
Thirdly, server processData uses a .val property of procesedData which isn't even a member (it's a type, likely should be filePacket_.val instead).
Fourthly, it assigns that from properties_.val; which ALSO doesn't exist at all (there's only val1 and val2).
Next up, rd isn't used to initialize the URBG (random engine, re). Instead it calls an unknown identifier named random_device(). Likely ought to be rd() instead.
Again, where it sais processedData.val you probably meant filePacket_.val
And where you write processedData.push_back(...) you probably meant to say filePacket_.values.push_back(...)...
There's a spurious ; behind void doAccept() in the Server
By contrast, the ; is missing after each struct definition in ipc_common.hpp
The processedData struct defines a serialize() method that is never used. It also uses a C-style cast where static_cast<unsigned>(val) would be safe.
Weirdly, the server "parses" args, and provides an optional default value BUT it never uses that. Instead, it uses argv[1] without checking argc at all. Oops.
That all aside, now comes the confusing part: how did you want the values to be written? This is not correct:
std::vector<ba::const_buffer> msg{
ba::buffer(&filePacketSize_, sizeof(uint16_t)),
ba::buffer(&filePacket_.val, sizeof(filePacket_.val)),
ba::buffer(&filePacket_.values, sizeof(filePacket_.values))};
values is a std::vector<> so you cannot hope to use it in a bitwise way. It'll just invoke Undefined Behaviour.
Besides, it's pretty unclear why filePacketSize_ is being written (it's never even assigned, or even initialized to a determinate value).
On the client side you read a responseSize as if one would be sent... Maybe you want to keep those two in sync.
Suggested Appraoch
I'd do away with the separate size value(s), since a vector already keeps track of that. I'd also make sure your processData doesn't always push_back because the vector would always keep growing.
I'd make a protocol that actually sends the message size before the message itself, and makes sure it's correct.
Let's also make the random data naturally printable (a..z) for simplicity:
void processData()
{
// Just for demonstration, we fill the vector with random characters
std::mt19937 re(std::random_device{}());
std::uniform_int_distribution<uint8_t> dist('a', 'z');
filePacket_.values.clear();
std::generate_n(back_inserter(filePacket_.values), properties_.val1,
[&] { return dist(re); });
}
Then in writing, let's do:
processData();
size_t length[] { filePacket_.values.size() };
std::vector<ba::const_buffer> msg{
ba::buffer(length),
ba::buffer(filePacket_.values)};
Note how, again, we avoid manually specifying any buffer sizes. Also, we let
the library figure out that values is a vector of POD elements and do the
math to convert the calculate the correct start address and buffer size for the
element data.
On the client side, we do the inverse:
size_t length = 0;
ba::read(socket, ba::buffer(&length, sizeof(length)));
response.values.resize(length);
ba::read(socket, ba::buffer(response.values));
(Here we can't avoid writing sizeof(length) without getting more clumsy than I'd like).
Full Demo
File ipc_common.hpp
// File: ipc_common.hpp
#include <cstdint>
#include <sstream>
#include <string>
#include <vector>
struct propertiesPacket {
uint8_t val1;
uint8_t val2;
};
struct processedData {
std::vector<uint8_t> values;
};
File server.cpp
#include <boost/asio.hpp>
#include <vector>
#include <iostream>
#include <random>
#include "ipc_common.hpp"
namespace ba = boost::asio;
using boost::asio::ip::tcp;
using boost::system::error_code;
using TCPSocket = tcp::socket;
class ServerConnection : public std::enable_shared_from_this<ServerConnection> {
public:
ServerConnection(TCPSocket socket) : socket_(std::move(socket))
{
}
void start()
{
std::clog << __PRETTY_FUNCTION__ << std::endl;
doRead();
}
private:
void doRead()
{
std::clog << __PRETTY_FUNCTION__ << std::endl;
auto self(shared_from_this());
socket_.async_read_some(
ba::buffer(&properties_, sizeof(properties_)),
[this, self](error_code ec, std::size_t length) {
std::clog << "received " << length << std::endl;
if (!ec) {
processData();
size_t length[] { filePacket_.values.size() };
std::vector<ba::const_buffer> msg{
ba::buffer(length), ba::buffer(filePacket_.values)};
ba::async_write(socket_, msg,
[this, self = shared_from_this()](
error_code ec, std::size_t length) {
std::clog << "written " << length
<< std::endl;
if (!ec)
doRead();
});
}
});
}
void processData()
{
// Just for demonstration, we fill the vector with random characters
std::mt19937 re(std::random_device{}());
std::uniform_int_distribution<uint8_t> dist('a', 'z');
filePacket_.values.clear();
std::generate_n(back_inserter(filePacket_.values), properties_.val1,
[&] { return dist(re); });
}
TCPSocket socket_;
propertiesPacket properties_;
processedData filePacket_;
};
class Server {
public:
using IOContext = ba::io_context;
using TCPAcceptor = tcp::acceptor;
Server(IOContext& io, uint16_t port)
: socket_(io)
, acceptor_(io, {tcp::v4(), port})
{
doAccept();
}
private:
void doAccept()
{
std::clog << __PRETTY_FUNCTION__ << std::endl;
acceptor_.async_accept(socket_, [this](error_code ec) {
if (!ec) {
std::clog << "Accepted " << socket_.remote_endpoint() << std::endl;
std::make_shared<ServerConnection>(std::move(socket_))->start();
doAccept();
} else {
std::clog << "Accept " << ec.message() << std::endl;
}
});
}
TCPSocket socket_;
TCPAcceptor acceptor_;
};
int main(int argc, char* argv[])
{
std::vector<std::string> args(argv, argv + argc);
switch (args.size()) {
case 1: args.push_back("6869"); break;
case 2: break;
default:
std::clog << "usage: " << args.at(0) << " [port = 6869]" << std::endl;
return 1;
}
try {
ba::io_context io;
Server server(io, std::stoi(args.at(1)));
io.run();
} catch (std::exception const &e) {
std::clog << e.what() << std::endl;
return 1;
}
}
File client.cpp
#include <iostream>
#include <vector>
#include <boost/asio.hpp>
#include "ipc_common.hpp"
namespace ba = boost::asio;
using boost::asio::ip::tcp;
int main(int argc, char *argv[])
{
ba::io_context io;
std::vector<std::string> args(argv, argv + argc);
switch (args.size()) {
case 1: args.push_back("localhost"); [[fallthrough]];
case 2: args.push_back("6869"); [[fallthrough]];
case 3: args.push_back("42"); [[fallthrough]];
case 4: args.push_back("99"); [[fallthrough]];
case 5: break;
default:
std::clog << "usage: " << args.at(0)
<< " [host = localhost] [port = 6869] [val1=42] [val2=99]"
<< std::endl;
return 1;
}
try {
propertiesPacket properties;
properties.val1 = std::stoul(args.at(3));
properties.val2 = std::stoul(args.at(4));
tcp::socket socket(io);
tcp::resolver resolver(io);
connect(socket, resolver.resolve({args.at(1), args.at(2)}));
write(socket, ba::buffer(&properties, sizeof(properties)));
processedData response{};
{
size_t length = 0;
ba::read(socket, ba::buffer(&length, sizeof(length)));
response.values.resize(length);
}
std::clog << "client response size: " << response.values.size() << std::endl;
ba::read(socket, ba::buffer(response.values));
std::clog.write(reinterpret_cast<char const*>(response.values.data()),
response.values.size()) << "\n";
// return 0;
} catch (std::exception &e) {
std::clog << e.what() << std::endl;
return 1;
}
}
Demo output:
Portability
You should probably keep byte ordering in mind as well. You could consider using JSON or another Well Known serialization format.
I need a parallel synchronous TCP solution using ASIO. I'm trying to get the example code from these examples working: https://github.com/jvillasante/asio-network-programming-cookbook/tree/master/src (using the server in ch04: 02_Sync_parallel_tcp_server.cpp and the client in ch03: 01_Sync_tcp_client.cpp).
The only thing I changed is the logging to append to text files.
The problem is that while the server runs fine, the client dies after returning a single response from the server:
libc++abi.dylib: terminating with uncaught exception of type boost::exception_detail::clone_impl<boost::exception_detail::error_info_injector<boost::system::system_error> >: shutdown: Socket is not connected
Code for the server:
#include <boost/asio.hpp>
#include <atomic>
#include <memory>
#include <thread>
#include <iostream>
#include <fstream>
using namespace boost;
class Service {
public:
Service() = default;
void StartHandlingClient(std::shared_ptr<asio::ip::tcp::socket> sock) {
std::thread th{[this, sock]() { HandleClient(sock); }};
th.detach();
}
private:
void HandleClient(std::shared_ptr<asio::ip::tcp::socket> sock) {
try {
asio::streambuf request;
asio::read_until(*sock.get(), request, '\n');
std::istream is(&request);
std::string line;
std::getline(is, line);
std::ofstream log("logfile2.txt", std::ios_base::app | std::ios_base::out);
log << "Request: " << line << "\n" << std::flush;
// Emulate request processing.
int i = 0;
while (i != 1000000) i++;
std::this_thread::sleep_for(std::chrono::milliseconds(500));
// Sending response.
std::string response = "Response\n";
asio::write(*sock.get(), asio::buffer(response));
} catch (std::system_error& e) {
std::ofstream log("logfile1.txt", std::ios_base::app | std::ios_base::out);
log << "Error occurred! Error code = " << e.code().value() << ". Message: " << e.what() << "\n" << std::flush;
}
// Clean up
delete this;
}
};
class Acceptor {
public:
Acceptor(asio::io_service& ios, unsigned short port_num)
: m_ios{ios}, m_acceptor{m_ios, asio::ip::tcp::endpoint{asio::ip::address_v4::any(), port_num}} {
m_acceptor.listen();
}
void Accept() {
auto sock = std::make_shared<asio::ip::tcp::socket>(m_ios);
m_acceptor.accept(*sock.get());
(new Service)->StartHandlingClient(sock);
}
private:
asio::io_service& m_ios;
asio::ip::tcp::acceptor m_acceptor;
};
class Server {
public:
Server() : m_stop{false} {}
void Start(unsigned short port_num) {
m_thread.reset(new std::thread([this, port_num]() { Run(port_num); }));
}
void Stop() {
m_stop.store(true);
m_thread->join();
}
private:
void Run(unsigned short port_num) {
Acceptor acc{m_ios, port_num};
while (!m_stop.load()) {
acc.Accept();
}
}
private:
std::unique_ptr<std::thread> m_thread;
std::atomic<bool> m_stop;
asio::io_service m_ios;
};
int main() {
unsigned short port_num = 3333;
try {
Server srv;
srv.Start(port_num);
std::this_thread::sleep_for(std::chrono::seconds(60));
srv.Stop();
} catch (std::system_error& e) {
std::ofstream log("logfile1.txt", std::ios_base::app | std::ios_base::out);
log << "Error occurred! Error code = " << e.code().value() << ". Message: " << e.what() << "\n" << std::flush;
}
return 0;
}
Code for the client:
#include <boost/asio.hpp>
#include <iostream>
#include <fstream>
using namespace boost;
class SyncTCPClient {
public:
SyncTCPClient(const std::string& raw_ip_address, unsigned short port_num)
: m_ep(asio::ip::address::from_string(raw_ip_address), port_num), m_sock(m_ios) {
m_sock.open(m_ep.protocol());
}
~SyncTCPClient() { close(); }
void connect() { m_sock.connect(m_ep); }
std::string emulateLongComputationOp(unsigned int duration_sec) {
std::string request = "EMULATE_LONG_COMP_OP " + std::to_string(duration_sec) + "\n";
sendRequest(request);
return receiveResponse();
}
private:
void close() {
if (m_sock.is_open()) {
std::ofstream log("logfile1.txt", std::ios_base::app | std::ios_base::out);
log << "shutting down\n" << std::flush;
m_sock.shutdown(asio::ip::tcp::socket::shutdown_both);
log << "closing the socket\n" << std::flush;
m_sock.close();
log << "socket closed\n" << std::flush;
}
}
void sendRequest(const std::string& request) { asio::write(m_sock, asio::buffer(request)); }
std::string receiveResponse() {
asio::streambuf buf;
asio::read_until(m_sock, buf, '\n');
std::istream input(&buf);
std::string response;
std::getline(input, response);
return response;
}
private:
asio::io_service m_ios;
asio::ip::tcp::endpoint m_ep;
asio::ip::tcp::socket m_sock;
};
int main() {
const std::string raw_ip_address = "127.0.0.1";
const unsigned short port_num = 3333;
try {
SyncTCPClient client{raw_ip_address, port_num};
// Sync connect.
client.connect();
std::cout << "Sending request to the server...\n";
std::string response = client.emulateLongComputationOp(10);
std::cout << "Response received: " << response << "\n";
} catch (std::system_error& e) {
std::ofstream log("logfile1.txt", std::ios_base::app | std::ios_base::out);
log << "Error occurred! Error code = " << e.code().value() << ". Message: " << e.what() << "\n" << std::flush;
return e.code().value();
}
return 0;
}
I don't see a lot wrong, and I cannot reproduce the problem with the code shown.
Things I do see:
the thread procedure could be a static because it's stateless (delete this is a code smell)
the thread needn't be detached (using boost::thread_group::join_all would be much better)
you were writing to the same logfile from server as well as client; results are undefined
spelling .store() and .load() on an atomic<bool> is un-idiomatic
spelling out *sock.get() on any kind of smart pointer is unforgivably un-idiomatic
writing code().value() - swallowing the category - is a BAD thing to do, and e.what() is NOT the way to get the message (use e.code().message()).
If you need flush, you might as well use std::endl
There's really no reason to use a shared_ptr in c++14:
asio::ip::tcp::socket sock(m_ios);
m_acceptor.accept(sock);
std::thread([sock=std::move(sock)]() mutable { HandleClient(sock); }).detach();
In C++11 stick to:
auto sock = std::make_shared<asio::ip::tcp::socket>(m_ios);
m_acceptor.accept(*sock);
std::thread([sock] { HandleClient(*sock); }).detach();
This means HandleClient can just take a ip::tcp::socket& instead of a smart pointer.
INTEGRATING
Server.cpp
#include <atomic>
#include <boost/asio.hpp>
#include <fstream>
#include <iostream>
#include <memory>
#include <thread>
using namespace boost;
static void HandleClient(asio::ip::tcp::socket& sock) {
try {
asio::streambuf buf;
asio::read_until(sock, buf, '\n');
std::string request;
getline(std::istream(&buf), request);
std::ofstream log("server.log", std::ios_base::app | std::ios_base::out);
log << "Request: " << request << std::endl;
// Emulate request processing.
int i = 0;
while (i != 1000000)
i++;
std::this_thread::sleep_for(std::chrono::milliseconds(500));
// Sending response.
std::string response = "Response\n";
asio::write(sock, asio::buffer(response));
} catch (std::system_error &e) {
std::ofstream log("server.log", std::ios_base::app | std::ios_base::out);
log << e.what() << " " << e.code() << ": " << e.code().message() << std::endl;
}
}
class Acceptor {
public:
Acceptor(asio::io_service &ios, unsigned short port_num)
: m_ios{ ios }, m_acceptor{ m_ios, asio::ip::tcp::endpoint{ asio::ip::address_v4::any(), port_num } } {
m_acceptor.listen();
}
void Accept() {
auto sock = std::make_shared<asio::ip::tcp::socket>(m_ios);
m_acceptor.accept(*sock);
std::thread([sock] { HandleClient(*sock); }).detach();
}
private:
asio::io_service &m_ios;
asio::ip::tcp::acceptor m_acceptor;
};
class Server {
public:
Server() : m_stop{ false } {}
void Start(unsigned short port_num) {
m_thread.reset(new std::thread([this, port_num]() { Run(port_num); }));
}
void Stop() {
m_stop = true;
m_thread->join();
}
private:
void Run(unsigned short port_num) {
Acceptor acc{ m_ios, port_num };
while (!m_stop) {
acc.Accept();
}
}
private:
std::unique_ptr<std::thread> m_thread;
std::atomic<bool> m_stop;
asio::io_service m_ios;
};
int main() {
unsigned short port_num = 3333;
try {
Server srv;
srv.Start(port_num);
std::this_thread::sleep_for(std::chrono::seconds(60));
srv.Stop();
} catch (std::system_error &e) {
std::ofstream log("server.log", std::ios_base::app | std::ios_base::out);
log << e.what() << " " << e.code() << ": " << e.code().message() << std::endl;
}
}
Client.cpp
#include <boost/asio.hpp>
#include <fstream>
#include <iostream>
using namespace boost;
class SyncTCPClient {
public:
SyncTCPClient(const std::string &raw_ip_address, unsigned short port_num)
: m_ep(asio::ip::address::from_string(raw_ip_address), port_num), m_sock(m_ios) {
m_sock.open(m_ep.protocol());
}
~SyncTCPClient() { close(); }
void connect() { m_sock.connect(m_ep); }
std::string emulateLongComputationOp(unsigned int duration_sec) {
std::string request = "EMULATE_LONG_COMP_OP " + std::to_string(duration_sec) + "\n";
sendRequest(request);
return receiveResponse();
}
private:
void close() {
if (m_sock.is_open()) {
std::ofstream log("client.log", std::ios_base::app | std::ios_base::out);
log << "shutting down" << std::endl;
m_sock.shutdown(asio::ip::tcp::socket::shutdown_both);
log << "closing the socket" << std::endl;
m_sock.close();
log << "socket closed" << std::endl;
}
}
void sendRequest(const std::string &request) { asio::write(m_sock, asio::buffer(request)); }
std::string receiveResponse() {
asio::streambuf buf;
asio::read_until(m_sock, buf, '\n');
std::string response;
getline(std::istream(&buf), response);
return response;
}
private:
asio::io_service m_ios;
asio::ip::tcp::endpoint m_ep;
asio::ip::tcp::socket m_sock;
};
int main() {
const std::string raw_ip_address = "127.0.0.1";
const unsigned short port_num = 3333;
try {
SyncTCPClient client{ raw_ip_address, port_num };
// Sync connect.
client.connect();
std::cout << "Sending request to the server...\n";
std::string response = client.emulateLongComputationOp(10);
std::cout << "Response received: " << response << std::endl;
} catch (std::system_error &e) {
std::ofstream log("client.log", std::ios_base::app | std::ios_base::out);
log << e.what() << " " << e.code() << ": " << e.code().message() << std::endl;
return e.code().value();
}
}
I knew that the thread in which runs io_service.run() is responsible of executing function handlers of an asynchronous operation, but I have problems in assigning a thread for an asynchronous operation that fires in callback function of a parent async operation.
For example consider the bellow program:
#ifdef WIN32
#define _WIN32_WINNT 0x0501
#include <stdio.h>
#endif
#include <fstream> // for writting to file
#include <iostream> // for writting to file
#include <stdlib.h> // atoi (string to integer)
#include <chrono>
#include <boost/thread.hpp> // for multi threading
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <signal.h> // For Interrupt Handling (Signal Handling Event)
#include <vector>
#define max_length 46
#define server_ip1 "127.0.0.1"
//#define server_ip2 "127.0.0.1"
#define server_port 4000
#define MEM_FN(x) boost::bind(&self_type::x, shared_from_this())
#define MEM_FN1(x,y) boost::bind(&self_type::x, shared_from_this(),y)
#define MEM_FN2(x,y,z) boost::bind(&self_type::x, shared_from_this(),y,z)
void talk1();
using namespace boost::asio;
io_service service, service2;
std::chrono::time_point<std::chrono::high_resolution_clock> t_start;
ip::udp::socket sock1(service);
ip::udp::endpoint ep1( ip::address::from_string(server_ip1), 4000);
//ip::udp::socket sock2(service);
//ip::udp::endpoint ep2( ip::address::from_string(server_ip2), 4000);
std::chrono::time_point<std::chrono::high_resolution_clock> tc;
int OnCon[2];
class talk_to_svr1 : public boost::enable_shared_from_this<talk_to_svr1>, boost::noncopyable {
typedef talk_to_svr1 self_type;
talk_to_svr1(const std::string & message, ip::udp::endpoint ep) : started_(true), message_(message) {}
void start(ip::udp::endpoint ep) {
do_write(message_);
}
public:
typedef boost::system::error_code error_code;
typedef boost::shared_ptr<talk_to_svr1> ptr;
static ptr start(ip::udp::endpoint ep, const std::string & message) {
ptr new_(new talk_to_svr1(message, ep));
new_->start(ep);
return new_;
}
bool started() { return started_; }
private:
void on_read(const error_code & err, size_t bytes) {
this->t2 = std::chrono::high_resolution_clock::now(); // Time of finished reading
if ( !err) {
auto t0_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t0-t_start).count();
auto t1_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t1-t_start).count();
auto t2_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t2-t_start).count();
std::cout << "Sock1: " << t0_rel << ", " << t1_rel << ", " << t2_rel << std::endl;
std::string msg(read_buffer_, bytes);
std::cout << msg << std::endl;
}
else {
std::cout << "Error occured in reading data from server (Sock1)" << std::endl;
}
}
void on_write(const error_code & err, size_t bytes) {
this->t1 = std::chrono::high_resolution_clock::now(); // Time of finished writting
std::cout << "Sock1 successfully sent " << bytes << " bytes of data" << std::endl;
do_read();
}
void do_read() {
sock1.async_receive_from(buffer(read_buffer_),ep1 ,MEM_FN2(on_read,_1,_2));
}
void do_write(const std::string & msg) {
if ( !started() ) return;
std::copy(msg.begin(), msg.end(), write_buffer_);
this->t0 = std::chrono::high_resolution_clock::now(); // Time of starting to write
sock1.async_send_to( buffer(write_buffer_, msg.size()), ep1, MEM_FN2(on_write,_1,_2) );
}
public:
std::chrono::time_point<std::chrono::high_resolution_clock> t0; // Time of starting to write
std::chrono::time_point<std::chrono::high_resolution_clock> t1; // Time of finished writting
std::chrono::time_point<std::chrono::high_resolution_clock> t2; // Time of finished reading
private:
int indx;
char read_buffer_[max_length];
char write_buffer_[max_length];
bool started_;
std::string message_;
};
void wait_s(int seconds)
{
boost::this_thread::sleep_for(boost::chrono::seconds{seconds});
}
void wait_ms(int msecs) {
boost::this_thread::sleep( boost::posix_time::millisec(msecs));
}
void async_thread() {
service.run();
}
void async_thread2() {
service2.run();
}
void GoOperational(int indx) {
if (indx == 0) {
talk_to_svr1::start(ep1, "Message01");
wait_s(1);
talk_to_svr1::start(ep1, "Message02");
wait_s(2);
}
else if (indx == 1) {
//talk_to_svr2::start(ep2, "Masoud");
wait_s(1);
//talk_to_svr2::start(ep2, "Ahmad");
wait_s(2);
}
else {
std::cout << "Wrong index!." << std::endl;
}
}
void on_connect(const boost::system::error_code & err, int ii) {
std::cout << "Socket "<< ii << " is connected."<< std::endl;
OnCon[ii] = 1;
if ( !err) {
tc = std::chrono::high_resolution_clock::now();
auto ty = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(tc-t_start).count();
std::cout << "Sock " << ii << " connected at time: " << ty << " seconds" << std::endl;
if ( (OnCon[0] /*+ OnCon[1]*/ ) == 1) {
GoOperational(0);
//GoOperational(1);
}
}
else {
std::cout << "Socket " << ii << "had a problem for connecting to server.";
}
}
int main(int argc, char* argv[]) {
OnCon[0] = 0;
OnCon[1] = 0;
ep1 = ep1;
//ep2 = ep2;
std::cout.precision(9);
std::cout << "///////////////////////" << std::endl;
std::cout << "Socket Number, Time of starting to write, Time of finished writting, time of finished reading" << std::endl;
t_start = std::chrono::high_resolution_clock::now();
sock1.async_connect(ep1, boost::bind(on_connect, boost::asio::placeholders::error, 0));
//sock2.async_connect(ep2, boost::bind(on_connect, boost::asio::placeholders::error, 1));
boost::thread b{boost::bind(async_thread)};
b.join();
}
In this program I have a global udp socket named sock1 which will connect by running sock1.async_connect() at line #9 of main function. At the callback function of this asynchronous operation, I make two instance of talk_to_svr1 class which each of them is responsible for sending a messages to server and then receiving the response from server asynchronously.
I need to wait 3 seconds before sending second message and that is why I called wait_s(1) before making second instance of talk_to_svr1. The problem is that calling wait_s(1) in addition to pausing the main thread will also pause the the asynchronous sending operation which is not desired.
I would be grateful if anybody could change the above code in a way that another thread become responsible for asynchronously sending message to server so that calling wait_s(1) will not pause sending operation.
Note: posted an alternative using coroutines as well
Asynchronous coding by definition doesn't require you to "control" threads. In fact, you shouldn't need threads. Of course, you can't block inside completion handlers because that will hinder progress.
You can simply use a timer, expiring in 3s, async_wait for it and in its completion handler send the second request.
Here's a big cleanup of your code. Note that I removed all use of global variables. They were making things very error prone and leading to a lot of duplication (in fact talk_to_svr1 hardcoded ep1 and sock1 so it was useless for your second channel, that was largely commented out).
The crux of the change is to have message_operation take a continuation:
template <typename F_>
void async_message(udp::socket& s, std::string const& message, F_&& handler) {
using Op = message_operation<F_>;
boost::shared_ptr<Op> new_(new Op(s, message, std::forward<F_>(handler)));
new_->do_write();
}
When the message/response is completed, handler is called. Now, we can implement the application protocol (basically what you tried to capture in on_connect/GoOperational):
////////////////////////////////////////////////////
// basic protocol (2 messages, 1 delay)
struct ApplicationProtocol {
ApplicationProtocol(ba::io_service& service, udp::endpoint ep, std::string m1, std::string m2, std::chrono::seconds delay = 3s)
: _service(service),
_endpoint(ep),
message1(std::move(m1)), message2(std::move(m2)),
delay(delay), timer(service)
{ }
void go() {
_socket.async_connect(_endpoint, boost::bind(&ApplicationProtocol::on_connect, this, _1));
}
private:
ba::io_service& _service;
udp::socket _socket{_service};
udp::endpoint _endpoint;
std::string message1, message2;
std::chrono::seconds delay;
ba::high_resolution_timer timer;
void on_connect(error_code ec) {
std::cout << _endpoint << " connected at " << relatime() << " ms\n";
if (!ec) {
async_message(_socket, message1, boost::bind(&ApplicationProtocol::on_message1_sent, this, _1, _2));
} else {
std::cout << "Socket had a problem for connecting to server.";
}
}
void on_message1_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 1 failed: " << ec.message() << "\n";
else {
std::cout << "Message 1 returned: '" << response << "'\n";
timer.expires_from_now(delay);
timer.async_wait(boost::bind(&ApplicationProtocol::on_delay_complete, this, _1));
}
}
void on_delay_complete(error_code ec) {
if (ec)
std::cout << "Delay faile: " << ec.message() << "\n";
else {
std::cout << "Delay completed\n";
async_message(_socket, message2, boost::bind(&ApplicationProtocol::on_message2_sent, this, _1, _2));
}
}
void on_message2_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 2 failed: " << ec.message() << "\n";
else {
std::cout << "Message 2 returned: '" << response << "'\n";
}
}
};
Note how much simpler it becomes to use it:
int main() {
ba::io_service service;
std::cout.precision(2);
std::cout << std::fixed;
ApplicationProtocol
channel1(service, {{}, 4000}, "Message01\n", "Message02\n", 3s),
channel2(service, {{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
channel1.go();
channel2.go();
service.run();
}
When running two udp services like so:
yes first|nl|netcat -ulp 4000& yes second|nl|netcat -ulp 4001& time wait
We get the following output: Live On Coliru
0.0.0.0:4000 connected at 1.87 ms
0.0.0.0:4001 connected at 1.99 ms
127.0.0.1:4000 successfully sent 10 bytes of data
127.0.0.1:4001 successfully sent 7 bytes of data
127.0.0.1:4000: start 1.91, written 2.03, finished 2.25 ms
Message 1 returned: ' 1 first
2 first
3 first
4 '
127.0.0.1:4001: start 2.00, written 2.06, finished 2.34 ms
Message 1 returned: ' 1 second
2 second
3 second
'
Delay completed
127.0.0.1:4001 successfully sent 6 bytes of data
127.0.0.1:4001: start 2002.46, written 2002.49, finished 2002.53 ms
Message 2 returned: '47 second
148 second
149 second
150 s'
Delay completed
127.0.0.1:4000 successfully sent 10 bytes of data
127.0.0.1:4000: start 3002.36, written 3002.39, finished 3002.41 ms
Message 2 returned: 'first
159 first
160 first
161 first
'
And the server side receives the following messages in sequence:
Full Code
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <boost/bind.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/shared_ptr.hpp>
#include <chrono>
#include <iostream>
#define MEM_FN2(x, y, z) boost::bind(&self_type::x, shared_from_this(), y, z)
namespace ba = boost::asio;
using ba::ip::udp;
using boost::system::error_code;
using ba::asio_handler_invoke;
////////////////////////////////////////////////////
// timing stuff
using namespace std::chrono_literals;
using hrclock = std::chrono::high_resolution_clock;
using time_point = hrclock::time_point;
static double relatime(time_point tp = hrclock::now()) {
static const time_point t_start = hrclock::now();
return (tp - t_start)/1.0ms;
}
////////////////////////////////////////////////////
// message operation - with F continuation
template <typename F>
class message_operation : public boost::enable_shared_from_this<message_operation<F> >, boost::noncopyable {
typedef message_operation self_type;
template <typename F_>
friend void async_message(udp::socket&, std::string const&, F_&&);
private:
template <typename F_>
message_operation(udp::socket& s, std::string message, F_&& handler)
: _socket(s), _endpoint(s.remote_endpoint()), handler_(std::forward<F_>(handler)), message_(std::move(message)) {}
using boost::enable_shared_from_this<message_operation>::shared_from_this;
void do_write() {
t0 = hrclock::now(); // Time of starting to write
_socket.async_send_to(ba::buffer(message_), _endpoint, MEM_FN2(on_write, _1, _2));
}
void on_write(const error_code & err, size_t bytes) {
t1 = hrclock::now(); // Time of finished writting
if (err)
handler_(err, "");
else
{
std::cout << _endpoint << " successfully sent " << bytes << " bytes of data\n";
do_read();
}
}
void do_read() {
_socket.async_receive_from(ba::buffer(read_buffer_), _sender, MEM_FN2(on_read, _1, _2));
}
void on_read(const error_code &err, size_t bytes) {
t2 = hrclock::now(); // Time of finished reading
if (!err) {
std::cout << _endpoint
<< ": start " << relatime(t0)
<< ", written " << relatime(t1)
<< ", finished " << relatime(t2)
<< " ms\n";
handler_(err, std::string(read_buffer_, bytes));
} else {
std::cout << "Error occured in reading data from server\n";
}
}
time_point t0, t1, t2; // Time of starting to write, finished writting, finished reading
// params
udp::socket& _socket;
udp::endpoint _endpoint;
F handler_;
// sending
std::string message_;
// receiving
udp::endpoint _sender;
char read_buffer_[46];
};
template <typename F_>
void async_message(udp::socket& s, std::string const& message, F_&& handler) {
using Op = message_operation<F_>;
boost::shared_ptr<Op> new_(new Op(s, message, std::forward<F_>(handler)));
new_->do_write();
}
////////////////////////////////////////////////////
// basic protocol (2 messages, 1 delay)
struct ApplicationProtocol {
ApplicationProtocol(ba::io_service& service, udp::endpoint ep, std::string m1, std::string m2, std::chrono::seconds delay = 3s)
: _service(service),
_endpoint(ep),
message1(std::move(m1)), message2(std::move(m2)),
delay(delay), timer(service)
{ }
void go() {
_socket.async_connect(_endpoint, boost::bind(&ApplicationProtocol::on_connect, this, _1));
}
private:
ba::io_service& _service;
udp::socket _socket{_service};
udp::endpoint _endpoint;
std::string message1, message2;
std::chrono::seconds delay;
ba::high_resolution_timer timer;
void on_connect(error_code ec) {
std::cout << _endpoint << " connected at " << relatime() << " ms\n";
if (!ec) {
async_message(_socket, message1, boost::bind(&ApplicationProtocol::on_message1_sent, this, _1, _2));
} else {
std::cout << "Socket had a problem for connecting to server.";
}
}
void on_message1_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 1 failed: " << ec.message() << "\n";
else {
std::cout << "Message 1 returned: '" << response << "'\n";
timer.expires_from_now(delay);
timer.async_wait(boost::bind(&ApplicationProtocol::on_delay_complete, this, _1));
}
}
void on_delay_complete(error_code ec) {
if (ec)
std::cout << "Delay faile: " << ec.message() << "\n";
else {
std::cout << "Delay completed\n";
async_message(_socket, message2, boost::bind(&ApplicationProtocol::on_message2_sent, this, _1, _2));
}
}
void on_message2_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 2 failed: " << ec.message() << "\n";
else {
std::cout << "Message 2 returned: '" << response << "'\n";
}
}
};
int main() {
ba::io_service service;
relatime(); // start the clock
std::cout.precision(2);
std::cout << std::fixed;
ApplicationProtocol
channel1(service, {{}, 4000}, "Message01\n", "Message02\n", 3s),
channel2(service, {{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
channel1.go();
channel2.go();
service.run();
}
In addition to the "normal" answer posted before, here's one that does exactly the same but using coroutines:
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <iostream>
namespace ba = boost::asio;
using ba::ip::udp;
using boost::system::error_code;
////////////////////////////////////////////////////
// timing stuff
using namespace std::chrono_literals;
using hrclock = std::chrono::high_resolution_clock;
using time_point = hrclock::time_point;
static double relatime(time_point tp = hrclock::now()) {
static const time_point t_start = hrclock::now();
return (tp - t_start)/1.0ms;
}
int main() {
ba::io_service service;
relatime(); // start the clock
std::cout.precision(2);
std::cout << std::fixed;
auto go = [&](udp::endpoint ep, std::string const& m1, std::string const& m2, hrclock::duration delay) {
ba::spawn(service, [=,&service](ba::yield_context yield) {
udp::socket sock(service);
time_point t0, t1, t2;
auto async_message = [&](std::string const& message) {
t0 = hrclock::now();
auto bytes = sock.async_send_to(ba::buffer(message), ep, yield);
t1 = hrclock::now();
char read_buffer_[46];
udp::endpoint _sender;
bytes = sock.async_receive_from(ba::buffer(read_buffer_), _sender, yield);
t2 = hrclock::now();
return std::string {read_buffer_, bytes};
};
try {
sock.async_connect(ep, yield);
std::cout << ep << " connected at " << relatime() << " ms\n";
std::cout << "Message 1 returned: '" << async_message(m1) << "'\n";
std::cout << ep << ": start " << relatime(t0) << ", written " << relatime(t1) << ", finished " << relatime(t2) << " ms\n";
ba::high_resolution_timer timer(service, delay);
timer.async_wait(yield);
std::cout << "Message 2 returned: '" << async_message(m2) << "'\n";
std::cout << ep << ": start " << relatime(t0) << ", written " << relatime(t1) << ", finished " << relatime(t2) << " ms\n";
} catch(std::exception const& e) {
std::cout << ep << " error: " << e.what() << "\n";
}
});
};
go({{}, 4000}, "Message01\n", "Message02\n", 3s),
go({{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
service.run();
}
As you can see, using coroutines has the luxury of having all coro state "implicitly" on the coro stack. This means: no more adhoc classes for async operations with state, and vastly reduced lifetime issues.
Output
0.0.0.0:4000 connected at 0.52 ms
Message 1 returned: '0.0.0.0:4001 connected at 0.64 ms
Message 1 returned: ' 1 first
2 first
3 first
4 '
0.0.0.0:4000: start 0.55, written 0.68, finished 0.86 ms
1 second
2 second
3 second
'
0.0.0.0:4001: start 0.65, written 0.70, finished 0.91 ms
Message 2 returned: '47 second
148 second
149 second
150 s'
0.0.0.0:4001: start 2001.03, written 2001.06, finished 2001.07 ms
Message 2 returned: 'first
159 first
160 first
161 first
'
0.0.0.0:4000: start 3001.10, written 3001.15, finished 3001.16 ms
I translated the example from Programming in Lua by Roberto Ierusalimschy for downloading several files via HTTP using coroutines to C++ using boost::asio and stackful coroutines. Here is the code:
#include <iostream>
#include <chrono>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
using namespace std;
using namespace boost::asio;
io_service ioService;
void download(const string& host, const string& file, yield_context& yield)
{
clog << "Downloading " << host << file << " ..." << endl;
size_t fileSize = 0;
boost::system::error_code ec;
ip::tcp::resolver resolver(ioService);
ip::tcp::resolver::query query(host, "80");
auto it = resolver.async_resolve(query, yield[ec]);
ip::tcp::socket socket(ioService);
socket.async_connect(*it, yield[ec]);
ostringstream req;
req << "GET " << file << " HTTP/1.0\r\n\r\n";
write(socket, buffer(req.str()));
while (true)
{
char data[8192];
size_t bytesRead = socket.async_read_some(buffer(data), yield[ec]);
if (0 == bytesRead) break;
fileSize += bytesRead;
}
socket.shutdown(ip::tcp::socket::shutdown_both);
socket.close();
clog << file << " size: " << fileSize << endl;
}
int main()
{
auto timeBegin = chrono::high_resolution_clock::now();
vector<pair<string, string>> resources =
{
{"www.w3.org", "/TR/html401/html40.txt"},
{"www.w3.org", "/TR/2002/REC-xhtml1-20020801/xhtml1.pdf"},
{"www.w3.org", "/TR/REC-html32.html"},
{"www.w3.org", "/TR/2000/REC-DOM-Level-2-Core-20001113/DOM2-Core.txt"},
};
for(const auto& res : resources)
{
spawn(ioService, [&res](yield_context yield)
{
download(res.first, res.second, yield);
});
}
ioService.run();
auto timeEnd = chrono::high_resolution_clock::now();
clog << "Time: " << chrono::duration_cast<chrono::milliseconds>(
timeEnd - timeBegin).count() << endl;
return 0;
}
Now I'm trying to translate the code to use stackless coroutines from boost::asio but the documentation is not enough for me to grok how to organize the code in such way to be able to do it. Can someone provide solution for this?
Here is a solution based on stackless coroutines as provided by Boost. Given that they are essentially a hack, I would not consider the solution particularly elegant. It could probably be done better with C++20, but I think that would be outside the scope of this question.
#include <functional>
#include <iostream>
#include <boost/asio.hpp>
#include <boost/asio/yield.hpp>
using boost::asio::async_write;
using boost::asio::buffer;
using boost::asio::error::eof;
using boost::system::error_code;
using std::placeholders::_1;
using std::placeholders::_2;
/**
* Stackless coroutine for downloading file from host.
*
* The lifetime of the object is limited to one () call. After that,
* the object will be copied and the old object is discarded. For this
* reason, the socket_ and resolver_ member are stored as shared_ptrs,
* so that they can live as long as there is a live copy. An alternative
* solution would be to manager these objects outside of the coroutine
* and to pass them here by reference.
*/
class downloader : boost::asio::coroutine {
using socket_t = boost::asio::ip::tcp::socket;
using resolver_t = boost::asio::ip::tcp::resolver;
public:
downloader(boost::asio::io_service &service, const std::string &host,
const std::string &file)
: socket_{std::make_shared<socket_t>(service)},
resolver_{std::make_shared<resolver_t>(service)}, file_{file},
host_{host} {}
void operator()(error_code ec = error_code(), std::size_t length = 0,
const resolver_t::results_type &results = {}) {
// Check if the last yield resulted in an error.
if (ec) {
if (ec != eof) {
throw boost::system::system_error{ec};
}
}
// Jump to after the previous yield.
reenter(this) {
yield {
resolver_t::query query{host_, "80"};
// Use bind to skip the length parameter not provided by async_resolve
auto result_func = std::bind(&downloader::operator(), this, _1, 0, _2);
resolver_->async_resolve(query, result_func);
}
yield socket_->async_connect(*results, *this);
yield {
std::ostringstream req;
req << "GET " << file_ << " HTTP/1.0\r\n\r\n";
async_write(*socket_, buffer(req.str()), *this);
}
while (true) {
yield {
char data[8192];
socket_->async_read_some(buffer(data), *this);
}
if (length == 0) {
break;
}
fileSize_ += length;
}
std::cout << file_ << " size: " << fileSize_ << std::endl;
socket_->shutdown(socket_t::shutdown_both);
socket_->close();
}
// Uncomment this to show progress and to demonstrace interleaving
// std::cout << file_ << " size: " << fileSize_ << std::endl;
}
private:
std::shared_ptr<socket_t> socket_;
std::shared_ptr<resolver_t> resolver_;
const std::string file_;
const std::string host_;
size_t fileSize_{};
};
int main() {
auto timeBegin = std::chrono::high_resolution_clock::now();
try {
boost::asio::io_service service;
std::vector<std::pair<std::string, std::string>> resources = {
{"www.w3.org", "/TR/html401/html40.txt"},
{"www.w3.org", "/TR/2002/REC-xhtml1-20020801/xhtml1.pdf"},
{"www.w3.org", "/TR/REC-html32.html"},
{"www.w3.org", "/TR/2000/REC-DOM-Level-2-Core-20001113/DOM2-Core.txt"},
};
std::vector<downloader> downloaders{};
std::transform(resources.begin(), resources.end(),
std::back_inserter(downloaders), [&](auto &x) {
return downloader{service, x.first, x.second};
});
std::for_each(downloaders.begin(), downloaders.end(),
[](auto &dl) { dl(); });
service.run();
} catch (std::exception &e) {
std::cerr << "exception: " << e.what() << "\n";
}
auto timeEnd = std::chrono::high_resolution_clock::now();
std::cout << "Time: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(timeEnd -
timeBegin)
.count()
<< std::endl;
return 0;
}
Compiled with Boost 1.72 and g++ -lboost_coroutine -lpthread test.cpp. Example output:
$ ./a.out
/TR/REC-html32.html size: 606
/TR/html401/html40.txt size: 629
/TR/2002/REC-xhtml1-20020801/xhtml1.pdf size: 115777
/TR/2000/REC-DOM-Level-2-Core-20001113/DOM2-Core.txt size: 229699
Time: 1644
The log line at the end of the () function can be uncommented to demonstrate the interleaving of the downloads.