I'm trying to understand how the zmq::proxy works, but I'm encountering problems: I'd like to have messages routed to the right worker, but seems like the identity and the evelopes are ignored: in the example I would like to route messages from client1 to worker2, and messages from client2 to worker1, but seems like the messages are served on a "first available worker" based rule.
Am I doing something wrong, or did I misunderstood how the identity works?
#include <atomic>
#include <cassert>
#include <chrono>
#include <iostream>
#include <thread>
#include <mutex>
#include <zmq.hpp>
#include <zmq_addon.hpp>
using namespace zmq;
std::atomic_bool running;
context_t context(4);
std::mutex mtx;
void client_func(std::string name, std::string target, std::string message)
{
std::this_thread::sleep_for(std::chrono::seconds(1));
socket_t request_socket(context, socket_type::req);
request_socket.connect("inproc://router");
request_socket.setsockopt( ZMQ_IDENTITY, name.c_str(), name.size());
while(running)
{
multipart_t msg;
msg.addstr(target);
msg.addstr("");
msg.addstr(message);
std::cout << name << "sent a message: " << message << std::endl;
msg.send(request_socket);
multipart_t reply;
if(reply.recv(request_socket))
{
std::unique_lock<std::mutex>(mtx);
std::cout << name << " received a reply!" << std::endl;
for(size_t i = 0 ; i < reply.size() ; i++)
{
std::string theData(static_cast<char*>(reply[i].data()),reply[i].size());
std::cout << "Part " << i << ": " << theData << std::endl;
}
}
std::this_thread::sleep_for(std::chrono::seconds(1));
}
request_socket.close();
}
void worker_func(std::string name, std::string answer)
{
std::this_thread::sleep_for(std::chrono::seconds(1));
socket_t response_socket(context, socket_type::rep);
response_socket.connect("inproc://dealer");
response_socket.setsockopt( ZMQ_IDENTITY, "resp", 4);
while(running)
{
multipart_t request;
if(request.recv(response_socket))
{
std::unique_lock<std::mutex>(mtx);
std::cout << name << " received a request:" << std::endl;
for(size_t i = 0 ; i < request.size() ; i++)
{
std::string theData(static_cast<char*>(request[i].data()),request[i].size());
std::cout << "Part " << i << ": " << theData << std::endl;
}
std::string questioner(static_cast<char*>(request[0].data()),request[0].size());
multipart_t msg;
msg.addstr(questioner);
msg.addstr("");
msg.addstr(answer);
msg.send(response_socket);
}
}
response_socket.close();
}
int main(int argc, char* argv[])
{
running = true;
zmq::socket_t dealer(context, zmq::socket_type::dealer);
zmq::socket_t router(context, zmq::socket_type::router);
dealer.bind("inproc://dealer");
router.bind("inproc://router");
std::thread client1(client_func, "Client1", "Worker2", "Ciao");
std::thread client2(client_func, "Client2", "Worker1", "Hello");
std::thread worker1(worker_func, "Worker1","World");
std::thread worker2(worker_func, "Worker2","Mondo");
zmq::proxy(dealer,router);
dealer.close();
router.close();
if(client1.joinable())
client1.join();
if(client2.joinable())
client2.join();
if(worker1.joinable())
worker1.join();
if(worker2.joinable())
worker2.join();
return 0;
}
From the docs:
When the frontend is a ZMQ_ROUTER socket, and the backend is a ZMQ_DEALER socket, the proxy shall act as a shared queue that collects requests from a set of clients, and distributes these fairly among a set of services. Requests shall be fair-queued from frontend connections and distributed evenly across backend connections. Replies shall automatically return to the client that made the original request.
The proxy handles multiple clients and and uses multiple workers to process the requests. The identity is used to send the response to the right client. You cannot use the identify to "select" a specific worker.
Related
I'm trying to write a very simple client/server app with boost::socket. I need a server to run and a single client to connect, send data, disconnect and possibly reconnect later and repeat.
The code reduced to the minimum is here:
Server app:
#include <iostream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
using boost::asio::ip::tcp;
class TheServer
{
public:
TheServer(int port) : m_port(port)
{
m_pIOService = new boost::asio::io_service;
m_pThread = new boost::thread(boost::bind<void>(&TheServer::run, this));
listenForNewConnection();
}
~TheServer()
{
m_bContinueReading = false;
m_pIOService->stop();
m_pThread->join();
delete m_pThread;
delete m_pSocket;
delete m_pAcceptor;
delete m_pIOService;
}
void listenForNewConnection()
{
if (m_pSocket)
delete m_pSocket;
if (m_pAcceptor)
delete m_pAcceptor;
// start new acceptor operation
m_pSocket = new tcp::socket(*m_pIOService);
m_pAcceptor = new tcp::acceptor(*m_pIOService, tcp::endpoint(tcp::v4(), m_port));
std::cout << "Starting async_accept" << std::endl;
m_pAcceptor->async_accept(*m_pSocket,
boost::bind<void>(&TheServer::readSession, this, boost::asio::placeholders::error));
}
void readSession(boost::system::error_code error)
{
if (!error)
{
std::cout << "Connection established" << std::endl;
while ( m_bContinueReading )
{
static unsigned char buffer[1000];
boost::system::error_code error;
size_t length = m_pSocket->read_some(boost::asio::buffer(&buffer, 1000), error);
if (!error && length != 0)
{
std::cout << "Received " << buffer << std::endl;
}
else
{
std::cout << "Received error, connection likely closed by peer" << std::endl;
break;
}
}
std::cout << "Connection closed" << std::endl;
listenForNewConnection();
}
else
{
std::cout << "Connection error" << std::endl;
}
std::cout << "Ending readSession" << std::endl;
}
void run()
{
while (m_bContinueReading)
m_pIOService->run_one();
std::cout << "Exiting run thread" << std::endl;
}
bool m_bContinueReading = true;
boost::asio::io_service* m_pIOService = NULL;
tcp::socket* m_pSocket = NULL;
tcp::acceptor* m_pAcceptor = NULL;
boost::thread* m_pThread = NULL;
int m_port;
};
int main(int argc, char* argv[])
{
TheServer* server = new TheServer(1900);
std::cout << "Press Enter to quit" << std::endl;
std::string sGot;
getline(std::cin, sGot);
delete server;
return 0;
}
Client app:
#include <iostream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
int main(int argc, char* argv[])
{
std::cout << std::endl;
std::cout << "Starting client" << std::endl;
using boost::asio::ip::tcp;
boost::asio::io_service* m_pIOService = NULL;
tcp::socket* m_pSocket = NULL;
try
{
m_pIOService = new boost::asio::io_service;
std::stringstream sPort;
sPort << 1900;
tcp::resolver resolver(*m_pIOService);
tcp::resolver::query query(tcp::v4(), "localhost", sPort.str());
tcp::resolver::iterator iterator = resolver.resolve(query);
m_pSocket = new tcp::socket(*m_pIOService);
m_pSocket->connect(*iterator);
std::cout << "Client conected" << std::endl;
std::string hello = "Hello World";
boost::asio::write( *m_pSocket, boost::asio::buffer(hello.data(), hello.size()) );
boost::this_thread::sleep(boost::posix_time::milliseconds(100));
hello += "(2)";
boost::asio::write(*m_pSocket, boost::asio::buffer(hello.data(), hello.size()));
}
catch (std::exception& e)
{
delete m_pSocket;
m_pSocket = NULL;
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
Note that I use non-blocking async_accept to be able to cleanly stop the server when Enter is pressed.
Under Windows, it works perfectly fine, I run the server, it outputs:
Starting async_accept
Press Enter to quit
For each client app run, it outpts:
Starting client
Client conected
and server app outputs:
Connection established
Received Hello World
Received Hello World(2)
Received error, connection likely closed by peer
Connection closed
Starting async_accept
Ending readSession
Then when I press Enter in server app console, it outputs Exiting run thread and cleanly stops.
Now, when I compile this same code under Linux, the client outputs the same as under Windows, but nothing happens on the server side...
Any idea what's wrong?
There are many questionable elements.
There is a classical data race on m_bContinueReading. You write from another thread, but the other thread may never see the change because of the data race.
The second race condition is likely your problem:
m_pThread = new boost::thread(boost::bind<void>(&TheServer::run, this));
listenForNewConnection();
Here the run thread may complete before you ever post the first work. You can use a work-guard to prevent this. In your specific code you would already fix it by reordering the lines:
listenForNewConnection();
m_pThread = new boost::thread(boost::bind<void>(&TheServer::run, this));
I would not do this, because I would not have those statements in my constructor body. See below for the work guard solution
There is a lot of raw pointer handling and new/delete going on, which merely invites errors.
You use the buffer assuming that it is NUL-terminated. This is especially unwarranted because you use read_some which will read partial messages as they arrive on the wire.
You use a static buffer while the code may have different instances of the class. This is very false optimization. Instead, prevent all the allocations! Combining with the previous item:
char buffer[1000];
while (m_bContinueReading) {
size_t length = m_Socket.read_some(asio::buffer(&buffer, 1000), ec);
std::cout << "Received " << length << " (" << quoted(std::string(buffer, length)) << "), "
<< ec.message() << std::endl;
if (ec.failed())
break;
}
You start a new acceptor always, where there is no need: a single acceptor can accept as many connections as you wish. In fact, the method shown runs into the problems
that lingering connections can prevent the new acceptor from binding to the same port. You could also alleviate that with
m_Acceptor.set_option(tcp::acceptor::reuse_address(true));
the destroyed acceptor may have backlogged connections, which are discarded
Typically you want to support concurrent connection, so you can split of a "readSession" and immediately accept the next connection. Now, strangely your code seems to expect clients to be connected until the server is prompted to shutdown (from the console) but after that you somehow start listening to new connections (even though you know the service will be stopping, and m_bContinueReading will remain false).
In the grand scheme of things, you don't want to destroy the acceptor unless something invalidated it. In practice this is rare (e.g. on Linux the acceptor will happily survive disabling/re-enabling the network adaptor).
you have spurious explicit template arguments (bind<void>). This is an anti-pattern and may lead to subtle problems
similar with the buffer (just say asio::buffer(buffer) and no longer have correctness concerns. In fact, don't use C-style arrays:
std::array<char, 1000> buffer;
size_t n = m_Socket.read_some(asio::buffer(buffer), ec);
std::cout << "Received " << n << " " << quoted(std::string(buffer.data(), n))
<< " (" << ec.message() << ")" << std::endl;
Instead of running a manual run_one() loop (where you forget to handle exceptions), consider "just" letting the service run(). Then you can .cancel() the acceptor to let the service run out of work.
In fact, this subtlety isn't required in your code, since your code already forces "ungraceful" shutdown anyways:
m_IOService.stop(); // nuclear option
m_Thread.join();
More gentle would be e.g.
m_Acceptor.cancel();
m_Socket.cancel();
m_Thread.join();
In which case you can respond to the completion error_code == error::operation_aborted to stop the session/accept loop.
Technically, you may be able to do away with the boolean flag altogether.
I keep it because it allows us to handle multiple session-per-thread in
"fire-and-forget" manner.
In the client you have many of the same problems, and also a gotcha where
you only look at the first resolver result (assuming there was one),
ignoring the rest. You can use asio::connect instead of
m_Socket.connect to try all resolved entries
Addressing the majority of these issues, simplifying the code:
Live On Coliru
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
#include <boost/optional.hpp>
#include <iomanip>
#include <iostream>
namespace asio = boost::asio;
using asio::ip::tcp;
using namespace std::chrono_literals;
using boost::system::error_code;
class TheServer {
public:
TheServer(int port) : m_port(port) {
m_Acceptor.set_option(tcp::acceptor::reuse_address(true));
do_accept();
}
~TheServer() {
m_shutdownRequested = true;
m_Work.reset(); // release the work-guard
m_Acceptor.cancel();
m_Thread.join();
}
private:
void do_accept() {
std::cout << "Starting async_accept" << std::endl;
m_Acceptor.async_accept( //
m_Socket, boost::bind(&TheServer::on_accept, this, asio::placeholders::error));
}
void on_accept(error_code ec) {
if (!ec) {
std::cout << "Connection established " << m_Socket.remote_endpoint() << std::endl;
// leave session running in the background:
std::thread(&TheServer::read_session_thread, this, std::move(m_Socket)).detach();
do_accept(); // and immediately accept new connection(s)
} else {
std::cout << "Connection error (" << ec.message() << ")" << std::endl;
std::cout << "Ending readSession" << std::endl;
}
}
void read_session_thread(tcp::socket sock) {
std::array<char, 1000> buffer;
for (error_code ec;;) {
size_t n = sock.read_some(asio::buffer(buffer), ec);
std::cout << "Received " << n << " " << quoted(std::string(buffer.data(), n)) << " ("
<< ec.message() << ")" << std::endl;
if (ec.failed() || m_shutdownRequested)
break;
}
std::cout << "Connection closed" << std::endl;
}
void thread_func() {
// http://www.boost.org/doc/libs/1_61_0/doc/html/boost_asio/reference/io_service.html#boost_asio.reference.io_service.effect_of_exceptions_thrown_from_handlers
for (;;) {
try {
m_IOService.run();
break; // exited normally
} catch (std::exception const& e) {
std::cerr << "[eventloop] error: " << e.what();
} catch (...) {
std::cerr << "[eventloop] unexpected error";
}
}
std::cout << "Exiting service thread" << std::endl;
}
std::atomic_bool m_shutdownRequested{false};
uint16_t m_port;
asio::io_service m_IOService;
boost::optional<asio::io_service::work> m_Work{m_IOService};
tcp::socket m_Socket{m_IOService};
tcp::acceptor m_Acceptor{m_IOService, tcp::endpoint{tcp::v4(), m_port}};
std::thread m_Thread{boost::bind(&TheServer::thread_func, this)};
};
constexpr uint16_t s_port = 1900;
void run_server() {
TheServer server(s_port);
std::cout << "Press Enter to quit" << std::endl;
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
void run_client() {
std::cout << std::endl;
std::cout << "Starting client" << std::endl;
using asio::ip::tcp;
try {
asio::io_service m_IOService;
tcp::resolver resolver(m_IOService);
auto iterator = resolver.resolve("localhost", std::to_string(s_port));
tcp::socket m_Socket(m_IOService);
connect(m_Socket, iterator);
std::cout << "Client connected" << std::endl;
std::string hello = "Hello World";
write(m_Socket, asio::buffer(hello));
std::this_thread::sleep_for(100ms);
hello += "(2)";
write(m_Socket, asio::buffer(hello));
} catch (std::exception& e) {
std::cerr << "Exception: " << e.what() << "\n";
}
}
int main(int argc, char**) {
if (argc>1)
run_server();
else
run_client();
}
Able to send UDP message to a particular IP port using Poco Lib socket communication, But unable to receive the UDP message as it is getting stuck at receiveFrom API of DatagramSocket as in below code.
I am sending message every second and also have to receive acknowledgement every second, for that i have timer , Client and Server Threads running parallelly. The problem here is I am unable to receive the UDP packets which are being captured on wireshark. It is getting stuck at receiveFrom.
Please find below Client Server and main files.
` Server.hpp
#pragma once
#include "Poco/Net/StreamSocket.h"
#include "Poco/Net/DatagramSocket.h"
#include "Poco/Net/SocketAddress.h"
#include "Poco/Net/MulticastSocket.h"
#include "Poco/RunnableAdapter.h"
#include "Poco/Thread.h"
#include <cstring>
#include <iostream>
using namespace std;
using namespace Poco;
using namespace Poco::Net;
struct Server
{
int bufferSize;
SocketAddress sockets;
static bool debugModeEnabled;
Server() :
bufferSize(1024) { //sockets = SocketAddress(10000);
}
Server(const UInt16& port, const int& bufferSize)
{
sockets = SocketAddress(port);
this->bufferSize = bufferSize;
}
void receiveMessages()
{
char buffer[bufferSize];
try
{
Poco::Net::DatagramSocket datagram(sockets);//(socket);
datagram.bind(sockets);
cout << "Server started socket" << endl;
while (!datagram.available())
{
SocketAddress sender;
cout << "Server started socket 2" << endl;
int size = datagram.receiveFrom(buffer, bufferSize, sender);
//int size = datagram.receiveBytes(buffer, bufferSize);
cout << "received bytes size" << size << endl;
buffer[size] = '\0';
//std::string str(buffer);
//cout << (debugModeEnabled ? (sender.toString() + ": ") : "- ") << buffer << endl;
cout << "received: " << size << buffer << endl;
//cout << buffer << "Server adasdasd" << endl;
if (string(buffer) == "\\end")
{
//cerr << "\nUser: " << sender.toString() << " ended connection" << endl;
datagram.close(); // Closes the server
}
}
}
catch (const Poco::Exception& exc)
{
std::cerr << exc.displayText() << std::endl;
}
}
};
bool Server::debugModeEnabled = false;
`
`Client.hpp
#pragma once
#include "Poco/Net/DatagramSocket.h"
#include "Poco/Net/SocketAddress.h"
#include "Poco/RunnableAdapter.h"
#include "Poco/Thread.h"
#include <iostream>
#include <string>
using namespace std;
using namespace Poco;
using namespace Poco::Net;
struct Client
{
SocketAddress socket;
string str;
// By default the client connects to itself
Client() { socket = SocketAddress("127.0.0.1", 10000); }
Client(const Poco::Net::IPAddress& IP, const UInt16& port, const string& val) :
str(val)
{
socket = SocketAddress(IP, port);
}
void sendMessages()
{
DatagramSocket datagram;
datagram.connect(socket);
string message = str;
//cout << "sending: " << hex << hexify(message) << endl;
unsigned int bytes_sent = 0;
while (!datagram.available())
{
//getline(cin, message);
//bytes_sent = datagram.sendBytes(message.data(), static_cast<int>(message.size()));
bytes_sent = datagram.sendTo(message.data(), static_cast<int>(message.size()),socket);
cout << "number of bytes sent: " << std::dec << bytes_sent << endl;
if (bytes_sent >= message.size())
{
datagram.close();
}
}
}
string IP() { return socket.host().toString(); }
UInt16 port() { return socket.port(); }
static void sendMessage(const Poco::Net::IPAddress& IP, const UInt16& port, const string& message)
{
SocketAddress socket(IP, port);
DatagramSocket datagram;
datagram.connect(socket);
datagram.sendBytes(message.data(), int(message.size()));
}
};
`
` main.cpp
int bufferSize = 1024;
int exit_status = 0;
Client client(IP, ciPort, str);
Server server(mdilPort, bufferSize);
RunnableAdapter<Client> clientRunnable(client, &Client::sendMessages);
RunnableAdapter<Server> serverRunnable(server, &Server::receiveMessages);
Thread clientThread, serverThread;
// Client::sendMessage(IP, ciPort, "hello!!");
try
{
Timer t = Timer();
t.setInterval([&]() {
cout << "client Tick" << endl;
// pApp->SendIndications();
clientThread.start(clientRunnable);
clientThread.join();
},
1000);
t.setInterval([&]() {
cout<< "server Tick" << endl;
serverThread.start(serverRunnable);
serverThread.join();
},
1000);
t.setTimeout([&]() {
std::cout << "Hey.. After 30s. But I will stop the timer!" << std::endl;
t.stop();
exit(exit_status);
},
30000);
std::cout << "I am Timer" << std::endl;
while (true); // Keep main thread active
}
catch (...)
{
std::cout << "catched exception" << std::endl;
//return -1;
}
`
I tried the conventional Socket Programming API's to receive the UDP packets but there also it is getting stuck at receiveFrom API. also tried running both client and server on different process to make sure there is no issue with the multi threading synchronization, but both the approach didnt help. I am able to capture the response at Wireshark but unable to receive on the application side using Poco Lib socket API's. Also allowed visual studio code through firewall as well
I have to handle information from 100 ports in parallel for 100ms per second.
I am using Ubuntu OS.
I did some research and i saw that poll() function is a good candidate, to avoid to open 100 threads to handle in parallel data coming on udp protocol.
I did main part with boost and I tried to integrate poll() with boost.
The problem is when i am trying to send by client data to the server, I receive nothing.
According to wireshark, data are coming on the right host. (localhost, port 1234)
Did I miss something or did I put something wrong ?
The test code (server) :
#include <deque>
#include <iostream>
#include <chrono>
#include <thread>
#include <sys/poll.h>
#include <boost/optional.hpp>
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
using boost::asio::ip::udp;
using namespace boost::asio;
using namespace std::chrono_literals;
std::string ip_address = "127.0.0.1";
template<typename T, size_t N>
size_t arraySize( T(&)[N] )
{
return(N);
}
class UdpReceiver
{
using Resolver = udp::resolver;
using Sockets = std::deque<udp::socket>;
using EndPoint = udp::endpoint;
using Buffer = std::array<char, 100>; // receiver buffer
public:
explicit UdpReceiver()
: work_(std::ref(resolver_context)), thread_( [this]{ resolver_context.run(); })
{ }
~UdpReceiver()
{
work_ = boost::none; // using work to keep run active always !
thread_.join();
}
void async_resolve(udp::resolver::query const& query_) {
resolver_context.post([this, query_] { do_resolve(query_); });
}
// callback for event-loop in main thread
void run_handler(int fd_idx) {
// start reading
auto result = read(fd_idx, receive_buf.data(), sizeof(Buffer));
// increment number of received packets
received_packets = received_packets + 1;
std::cout << "Received bytes " << result << " current recorded packets " << received_packets <<'\n';
// run handler posted from resolver threads
handler_context.poll();
handler_context.reset();
}
static void handle_receive(boost::system::error_code error, udp::resolver::iterator const& iterator) {
std::cout << "handle_resolve:\n"
" " << error.message() << "\n";
if (!error)
std::cout << " " << iterator->endpoint() << "\n";
}
// get current file descriptor
int fd(size_t idx)
{
return sockets[idx].native_handle();
}
private:
void do_resolve(boost::asio::ip::udp::resolver::query const& query_) {
boost::system::error_code error;
Resolver resolver(resolver_context);
Resolver::iterator result = resolver.resolve(query_, error);
sockets.emplace_back(udp::socket(resolver_context, result->endpoint()));
// post handler callback to service running in main thread
resolver_context.post(boost::bind(&UdpReceiver::handle_receive, error, result));
}
private:
Sockets sockets;
size_t received_packets = 0;
EndPoint remote_receiver;
Buffer receive_buf {};
io_context resolver_context;
io_context handler_context;
boost::optional<boost::asio::io_context::work> work_;
std::thread thread_;
};
int main (int argc, char** argv)
{
UdpReceiver udpReceiver;
udpReceiver.async_resolve(udp::resolver::query(ip_address, std::to_string(1234)));
//logic
pollfd fds[2] { };
for(int i = 0; i < arraySize(fds); ++i)
{
fds[i].fd = udpReceiver.fd(0);
fds[i].events = 0;
fds[i].events |= POLLIN;
fcntl(fds[i].fd, F_SETFL, O_NONBLOCK);
}
// simple event-loop
while (true) {
if (poll(fds, arraySize(fds), -1)) // waiting for wakeup call. Timeout - inf
{
for(auto &fd : fds)
{
if(fd.revents & POLLIN) // checking if we have something to read
{
fd.revents = 0; // reset kernel message
udpReceiver.run_handler(fd.fd); // call resolve handler. Do read !
}
}
}
}
return 0;
}
This looks like a confused mix of C style poll code and Asio code. The point is
you don't need poll (Asio does it internally (or epoll/select/kqueue/IOCP - whatever is available)
UDP is connectionless, so you don't need more than one socket to receive all "connections" (senders)
I'd replace it all with a single udp::socket on a single thread. You don't even have to manage the thread/work:
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
Let's run an asynchronous receive loop for 5s:
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
At the end, we can report the statistics:
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
With a similated load in bash:
function client() { while read a; do echo "$a" > /dev/udp/localhost/1234 ; done < /etc/dictionaries-common/words; }
for a in {1..20}; do client& done; time wait
We get:
A total of 294808 were received from 28215 unique senders
real 0m5,007s
user 0m0,801s
sys 0m0,830s
This is obviously not optimized, the bottle neck here is likely the many many bash subshells being launched for the clients.
Full Listing
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
#include <iostream>
#include <set>
namespace net = boost::asio;
using boost::asio::ip::udp;
using boost::system::error_code;
using namespace std::chrono_literals;
int main ()
{
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
std::set<udp::endpoint> unique_senders;
size_t received_packets = 0;
{
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
}
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
}
I'm implementing a TCP client which read and send files and strings and I'm using Boost as my main library. I'd like to continue reading or sending files while I keep sending strings, which in these case are the commands to send to the server. For this purpose I thought about using a Thread Pool in order to not overload the client. My question is, can I use futures to use callbacks when on of the thread in the pool ends? In case I can't, is there any other solution?
I was doing something like this, where pool_ is a boost:asio:thread_pool
void send_file(std::string const& file_path){
boost::asio::post(pool_, [this, &file_path] {
handle_send_file(file_path);
});
// DO SOMETHING WHEN handle_send_file ENDS
}
void handle_send_file(std::string const& file_path) {
boost::array<char, 1024> buf{};
boost::system::error_code error;
std::ifstream source_file(file_path, std::ios_base::binary | std::ios_base::ate);
if(!source_file) {
std::cout << "[ERROR] Failed to open " << file_path << std::endl;
//TODO gestire errore
}
size_t file_size = source_file.tellg();
source_file.seekg(0);
std::string file_size_readable = file_size_to_readable(file_size);
// First send file name and file size in bytes to server
boost::asio::streambuf request;
std::ostream request_stream(&request);
request_stream << file_path << "\n"
<< file_size << "\n\n"; // Consider sending readable version, does it change anything?
// Send the request
boost::asio::write(*socket_, request, error);
if(error){
std::cout << "[ERROR] Send request error:" << error << std::endl;
//TODO lanciare un'eccezione? Qua dovrò controllare se il server funziona o no
}
if(DEBUG) {
std::cout << "[DEBUG] " << file_path << " size is: " << file_size_readable << std::endl;
std::cout << "[DEBUG] Start sending file content" << std::endl;
}
long bytes_sent = 0;
float percent = 0;
print_percentage(percent);
while(!source_file.eof()) {
source_file.read(buf.c_array(), (std::streamsize)buf.size());
int bytes_read_from_file = source_file.gcount(); //int is fine because i read at most buf's size, 1024 in this case
if(bytes_read_from_file<=0) {
std::cout << "[ERROR] Read file error" << std::endl;
break;
//TODO gestire questo errore
}
percent = std::ceil((100.0 * bytes_sent) / file_size);
print_percentage(percent);
boost::asio::write(*socket_, boost::asio::buffer(buf.c_array(), source_file.gcount()),
boost::asio::transfer_all(), error);
if(error) {
std::cout << "[ERROR] Send file error:" << error << std::endl;
//TODO lanciare un'eccezione?
}
bytes_sent += bytes_read_from_file;
}
std::cout << "\n" << "[INFO] File " << file_path << " sent successfully!" << std::endl;
}
The operations posted to the pool end without the threads ending. That's the whole purpose of pooling the threads.
void send_file(std::string const& file_path){
post(pool_, [this, &file_path] {
handle_send_file(file_path);
});
// DO SOMETHING WHEN handle_send_file ENDS
}
This has several issues. The largest one is that you should not capture file_path by reference, as the argument is soon out of scope, and the handle_send_file call will run at an unspecified time in another thread. That's a race condition and dangling reference. Undefined Behaviour results.
Then the
// DO SOMETHING WHEN handle_send_file ENDS
is on a line which has no sequence relation with handle_send_file. In fact, it will probably run before that operation ever has a chance to start.
Simplifying
Here's a simplified version:
#include <boost/array.hpp>
#include <boost/asio.hpp>
#include <fstream>
#include <iostream>
namespace asio = boost::asio;
using asio::ip::tcp;
static asio::thread_pool pool_;
struct X {
std::unique_ptr<tcp::socket> socket_;
explicit X(unsigned short port) : socket_(new tcp::socket{ pool_ }) {
socket_->connect({ {}, port });
}
asio::thread_pool pool_;
std::unique_ptr<tcp::socket> socket_{ new tcp::socket{ pool_ } };
void send_file(std::string file_path) {
post(pool_, [=, this] {
send_file_implementation(file_path);
// DO SOMETHING WHEN send_file_implementation ENDS
});
}
// throws system_error exception
void send_file_implementation(std::string file_path) {
std::ifstream source_file(file_path,
std::ios_base::binary | std::ios_base::ate);
size_t file_size = source_file.tellg();
source_file.seekg(0);
write(*socket_,
asio::buffer(file_path + "\n" + std::to_string(file_size) + "\n\n"));
boost::array<char, 1024> buf{};
while (source_file.read(buf.c_array(), buf.size()) ||
source_file.gcount() > 0)
{
int n = source_file.gcount();
if (n <= 0) {
using namespace boost::system;
throw system_error(errc::io_error, system_category());
}
write(*socket_, asio::buffer(buf), asio::transfer_exactly(n));
}
}
};
Now, you can indeed run several of these operations in parallel (assuming several instances of X, so you have separate socket_ connections).
To do something at the end, just put code where I moved the comment:
// DO SOMETHING WHEN send_file_implementation ENDS
If you don't know what to do there and you wish to make a future ready at that point, you can:
std::future<void> send_file(std::string file_path) {
std::packaged_task<void()> task([=, this] {
send_file_implementation(file_path);
});
return post(pool_, std::move(task));
}
This overload of post magically¹ returns the future from the packaged task. That packaged task will set the internal promise with either the (void) return value or the exception thrown.
See it in action: Live On Coliru
int main() {
// send two files simultaneously to different connections
X clientA(6868);
X clientB(6969);
std::future<void> futures[] = {
clientA.send_file("main.cpp"),
clientB.send_file("main.cpp"),
};
for (auto& fut : futures) try {
fut.get();
std::cout << "Everything completed without error\n";
} catch(std::exception const& e) {
std::cout << "Error occurred: " << e.what() << "\n";
};
pool_.join();
}
I tested this while running two netcats to listen on 6868/6969:
nc -l -p 6868 | head& nc -l -p 6969 | md5sum&
./a.out
wait
The server prints:
Everything completed without error
Everything completed without error
The netcats print their filtered output:
main.cpp
1907
#include <boost/array.hpp>
#include <boost/asio.hpp>
#include <fstream>
#include <iostream>
#include <future>
namespace asio = boost::asio;
using asio::ip::tcp;
7ecb71992bcbc22bda44d78ad3e2a5ef -
¹ not magic: see https://www.boost.org/doc/libs/1_66_0/doc/html/boost_asio/reference/async_result.html
I want to send a struct from client to server using boost::asio. I followed boost tutorial link https://www.boost.org/doc/libs/1_47_0/doc/html/boost_asio/examples.html#boost_asio.examples.serialization. I slighty modified the code in server.cpp and client.cpp. With the new code, after a connection is established, client.cpp writes the struct stock to server and reads stock information at server side. (in the tutorial version, after a connection established, the server writes stock struct to client and client reads them. This version works for me.)
My problem is that after a connection is established, the async_write in client.cpp causes error
Error in write: An existing connection was forcibly closed by the remote host
and the async_read in server.cpp causes error
Error in read:The network connection was aborted by the local system.
As suggested by some forum answers, I changed this pointers in function handlers of async_write and async_read to shared_from_this. Still the problem exists.
I am not able to identify whether the client or the server side is causing problem. Please help.
server.cpp
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/lexical_cast.hpp>
#include <iostream>
#include <vector>
#include "connection.h" // Must come before boost/serialization headers.
#include <boost/serialization/vector.hpp>
#include <boost/enable_shared_from_this.hpp>
#include "stock.h"
namespace s11n_example
{
/// Serves stock quote information to any client that connects to it.
class server : public boost::enable_shared_from_this<server>
{
private:
/// The acceptor object used to accept incoming socket connections.
boost::asio::ip::tcp::acceptor acceptor_;
/// The data to be sent to each client.
std::vector<stock> stocks_;
public:
/// Constructor opens the acceptor and starts waiting for the first incoming
/// connection.
server(boost::asio::io_service& io_service, unsigned short port):
acceptor_(io_service, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port))
{
// Start an accept operation for a new connection.
connection_ptr new_conn(new connection(acceptor_.get_io_service()));
acceptor_.async_accept(new_conn->socket(),
boost::bind(&server::handle_accept, this,boost::asio::placeholders::error, new_conn));
}
/// Handle completion of a accept operation.
void handle_accept(const boost::system::error_code& e, connection_ptr conn)
{
if (!e)
{
std::cout << "Received a connection" <<std::endl;
conn->async_read(stocks_,
boost::bind(&server::handle_read, shared_from_this(),boost::asio::placeholders::error));
}
// Start an accept operation for a new connection.
connection_ptr new_conn(new connection(acceptor_.get_io_service()));
acceptor_.async_accept(new_conn->socket(),
boost::bind(&server::handle_accept, this,boost::asio::placeholders::error, new_conn));
}
/// Handle completion of a read operation.
void handle_read(const boost::system::error_code& e)
{
if (!e)
{
// Print out the data that was received.
for (std::size_t i = 0; i < stocks_.size(); ++i)
{
std::cout << "Stock number " << i << "\n";
std::cout << " code: " << stocks_[i].code << "\n";
std::cout << " name: " << stocks_[i].name << "\n";
std::cout << " open_price: " << stocks_[i].open_price << "\n";
std::cout << " high_price: " << stocks_[i].high_price << "\n";
std::cout << " low_price: " << stocks_[i].low_price << "\n";
std::cout << " last_price: " << stocks_[i].last_price << "\n";
std::cout << " buy_price: " << stocks_[i].buy_price << "\n";
std::cout << " buy_quantity: " << stocks_[i].buy_quantity << "\n";
std::cout << " sell_price: " << stocks_[i].sell_price << "\n";
std::cout << " sell_quantity: " << stocks_[i].sell_quantity << "\n";
}
}
else
{
// An error occurred.
std::cerr << "Error in read:" << e.message() << std::endl;
}
}
};
} // namespace s11n_example
int main(int argc, char* argv[])
{
try
{
// Check command line arguments.
if (argc != 2)
{
std::cerr << "Usage: server <port>" << std::endl;
return 1;
}
unsigned short port = boost::lexical_cast<unsigned short>(argv[1]);
boost::asio::io_service io_service;
boost::shared_ptr<s11n_example::server> server(new s11n_example::server(io_service, port));
io_service.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
client.cpp
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <iostream>
#include <vector>
#include "connection.h" // Must come before boost/serialization headers.
#include <boost/serialization/vector.hpp>
#include <boost/enable_shared_from_this.hpp>
#include "stock.h"
namespace s11n_example {
/// Downloads stock quote information from a server.
class client : public boost::enable_shared_from_this<client>
{
private:
/// The connection to the server.
connection connection_;
/// The data received from the server.
std::vector<stock> stocks_;
public:
/// Constructor starts the asynchronous connect operation.
client(boost::asio::io_service& io_service, const std::string& host, const std::string& service)
: connection_(io_service)
{
// Resolve the host name into an IP address.
boost::asio::ip::tcp::resolver resolver(io_service);
boost::asio::ip::tcp::resolver::query query(host, service);
boost::asio::ip::tcp::resolver::iterator endpoint_iterator =
resolver.resolve(query);
// Start an asynchronous connect operation.
boost::asio::async_connect(connection_.socket(), endpoint_iterator,
boost::bind(&client::handle_connect, this,boost::asio::placeholders::error));
}
/// Handle completion of a connect operation.
void handle_connect(const boost::system::error_code& e) //, connection_ptr conn
{
if (!e)
{
std::cout << "Connected to server!" << std::endl;
// Create the data to be sent to each client.
stock s;
s.code = "ABC";
s.name = "A Big Company";
s.open_price = 4.56;
s.high_price = 5.12;
s.low_price = 4.33;
s.last_price = 4.98;
s.buy_price = 4.96;
s.buy_quantity = 1000;
s.sell_price = 4.99;
s.sell_quantity = 2000;
stocks_.push_back(s);
s.code = "DEF";
s.name = "Developer Entertainment Firm";
s.open_price = 20.24;
s.high_price = 22.88;
s.low_price = 19.50;
s.last_price = 19.76;
s.buy_price = 19.72;
s.buy_quantity = 34000;
s.sell_price = 19.85;
s.sell_quantity = 45000;
stocks_.push_back(s);
// Successfully established connection. Start operation to write the list
// of stocks.
connection_.async_write(stocks_,
boost::bind(&client::handle_write, shared_from_this(),boost::asio::placeholders::error)); //,&conn )
}
else
{
// An error occurred. Log it and return.
std::cerr << "Error in connecting to server" << e.message() << std::endl;
}
}
/// Handle completion of a write operation.
void handle_write(const boost::system::error_code& e)//, connection* conn
{
if (!e)
{
std::cout << "Finished writing to server" << std::endl;
}
else
{
// An error occurred. Log it and return. Since we are not starting a new
// operation the io_service will run out of work to do and the client will
// exit.
std::cerr << "Error in write: " << e.message() << std::endl;
}
// Nothing to do. The socket will be closed automatically when the last
// reference to the connection object goes away.
}
};
} // namespace s11n_example
int main(int argc, char* argv[])
{
try
{
// Check command line arguments.
if (argc != 3)
{
std::cerr << "Usage: client <host> <port>" << std::endl;
return 1;
}
boost::asio::io_service io_service;
//s11n_example::client client(io_service, argv[1], argv[2]);
boost::shared_ptr<s11n_example::client> client(new s11n_example::client(io_service, argv[1], argv[2]));
io_service.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
Thanks.
You need to pass conn to handle read otherwise it will be destructed at the end of the handle_accept method. When it's destructed the socket it contains will also be destructed and the connection will close.
conn->async_read(stocks_,
boost::bind(&server::handle_read, shared_from_this(), conn, boost::asio::placeholders::error));
Lambdas make this easier to read than using bind:
auto self = shared_from_this();
conn->async_read(stocks_,
[self, this, conn] (boost::system::error_code ec) { handle_read(ec); });
The variables listed in the capture list will be copied so the shared pointers will be kept alive.