I am building a programme with C++ in which I want to run "two threads looking at two sources for data asynchronously". I do not know if this is possible or not. I have so far been able to read data from one source with a single thread. I want to read data from two threads asynchronously. Has anyone done something like this in the past that can point me in the right direction? I am using the asio C++ standalone version. Or is this something that is just not possible?
I will really appreciate you advice.
This is my code so far.
#include <iostream>
#define ASIO_STANDALONE
#include <asio.hpp>
#include <asio/ts/buffer.hpp>
#include <asio/ts/internet.hpp>
std::vector<char> vBuffer(2 * 1024);
void getData1(asio::ip::tcp::socket& socket1)
{
socket1.async_read_some(asio::buffer(vBuffer.data(), vBuffer.size()),
[&](std::error_code ec, std::size_t length)
{
if (!ec)
{
std::cout << "\n\nRead " << length << " bytes\n\n";
for (int i = 0; i < length; i++)
std::cout << vBuffer[i];
getData1(socket1);
}
}
);
}
//Make it read from two sources
void getData2(asio::ip::tcp::socket& socket1)
{
socket1.async_read_some(asio::buffer(vBuffer.data(), vBuffer.size()),
[&](std::error_code ec, std::size_t length)
{
if (!ec)
{
std::cout << "\n\nRead " << length << " bytes\n\n";
for (int i = 0; i < length; i++)
std::cout << vBuffer[i];
getData2(socket1);
}
}
);
}
int main() {
asio::error_code ec;
asio::io_context context1;
asio::io_context context2; //This to read data fron another source, eg another ip address and port
//Allow asio to do some fake tasks so that the context doesn't finish
asio::io_context::work idleWork(context1);
//Start the context
std::thread thrContext = std::thread([&]() {context1.run(); });
std::thread thrContext2 = std::thread([&]() {context2.run();});
//Get the address of somewhere we wish to connect to
asio::ip::tcp::endpoint endpoint(asio::ip::make_address("51.38.81.49", ec), 80); //ip exists
asio::ip::tcp::endpoint endpoint2(asio::ip::make_address("127.0.0.1", ec), 80); //ip exists
//Create a socket the, the context will deliver the implementation
asio::ip::tcp::socket socket1(context1);
asio::ip::tcp::socket socket2(context2);
//Tell the socket to connect to the first address specified
socket1.connect(endpoint, ec);
//Tell the socket to connect to the second address specified
socket2.connect(endpoint2, ec);
//Here we can check the error code to see if it was successful
if (!ec)
{
std::cout << "Connected successfully" << std::endl;
}
else
{
std::cout << "Failed to connect to address:\n" << ec.message() << std::endl;
}
//Here we check if socket is open or not with an if statement
if (socket1.is_open())
{
getData1(socket1); //Here we are calling our function as against calling it at the buttom part
std::string sRequest =
"GET /index.html HTTP/1.1\r\n"
"Host: example.com\r\n\r\n";
socket1.write_some(asio::buffer(sRequest.data(), sRequest.size()), ec);
//Stop programme from exiting prematurely
using namespace std::chrono_literals;
std::this_thread::sleep_for(2000ms);
}
system("pause");
return 0;
}
Related
I'm trying to write a very simple client/server app with boost::socket. I need a server to run and a single client to connect, send data, disconnect and possibly reconnect later and repeat.
The code reduced to the minimum is here:
Server app:
#include <iostream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
using boost::asio::ip::tcp;
class TheServer
{
public:
TheServer(int port) : m_port(port)
{
m_pIOService = new boost::asio::io_service;
m_pThread = new boost::thread(boost::bind<void>(&TheServer::run, this));
listenForNewConnection();
}
~TheServer()
{
m_bContinueReading = false;
m_pIOService->stop();
m_pThread->join();
delete m_pThread;
delete m_pSocket;
delete m_pAcceptor;
delete m_pIOService;
}
void listenForNewConnection()
{
if (m_pSocket)
delete m_pSocket;
if (m_pAcceptor)
delete m_pAcceptor;
// start new acceptor operation
m_pSocket = new tcp::socket(*m_pIOService);
m_pAcceptor = new tcp::acceptor(*m_pIOService, tcp::endpoint(tcp::v4(), m_port));
std::cout << "Starting async_accept" << std::endl;
m_pAcceptor->async_accept(*m_pSocket,
boost::bind<void>(&TheServer::readSession, this, boost::asio::placeholders::error));
}
void readSession(boost::system::error_code error)
{
if (!error)
{
std::cout << "Connection established" << std::endl;
while ( m_bContinueReading )
{
static unsigned char buffer[1000];
boost::system::error_code error;
size_t length = m_pSocket->read_some(boost::asio::buffer(&buffer, 1000), error);
if (!error && length != 0)
{
std::cout << "Received " << buffer << std::endl;
}
else
{
std::cout << "Received error, connection likely closed by peer" << std::endl;
break;
}
}
std::cout << "Connection closed" << std::endl;
listenForNewConnection();
}
else
{
std::cout << "Connection error" << std::endl;
}
std::cout << "Ending readSession" << std::endl;
}
void run()
{
while (m_bContinueReading)
m_pIOService->run_one();
std::cout << "Exiting run thread" << std::endl;
}
bool m_bContinueReading = true;
boost::asio::io_service* m_pIOService = NULL;
tcp::socket* m_pSocket = NULL;
tcp::acceptor* m_pAcceptor = NULL;
boost::thread* m_pThread = NULL;
int m_port;
};
int main(int argc, char* argv[])
{
TheServer* server = new TheServer(1900);
std::cout << "Press Enter to quit" << std::endl;
std::string sGot;
getline(std::cin, sGot);
delete server;
return 0;
}
Client app:
#include <iostream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
int main(int argc, char* argv[])
{
std::cout << std::endl;
std::cout << "Starting client" << std::endl;
using boost::asio::ip::tcp;
boost::asio::io_service* m_pIOService = NULL;
tcp::socket* m_pSocket = NULL;
try
{
m_pIOService = new boost::asio::io_service;
std::stringstream sPort;
sPort << 1900;
tcp::resolver resolver(*m_pIOService);
tcp::resolver::query query(tcp::v4(), "localhost", sPort.str());
tcp::resolver::iterator iterator = resolver.resolve(query);
m_pSocket = new tcp::socket(*m_pIOService);
m_pSocket->connect(*iterator);
std::cout << "Client conected" << std::endl;
std::string hello = "Hello World";
boost::asio::write( *m_pSocket, boost::asio::buffer(hello.data(), hello.size()) );
boost::this_thread::sleep(boost::posix_time::milliseconds(100));
hello += "(2)";
boost::asio::write(*m_pSocket, boost::asio::buffer(hello.data(), hello.size()));
}
catch (std::exception& e)
{
delete m_pSocket;
m_pSocket = NULL;
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
Note that I use non-blocking async_accept to be able to cleanly stop the server when Enter is pressed.
Under Windows, it works perfectly fine, I run the server, it outputs:
Starting async_accept
Press Enter to quit
For each client app run, it outpts:
Starting client
Client conected
and server app outputs:
Connection established
Received Hello World
Received Hello World(2)
Received error, connection likely closed by peer
Connection closed
Starting async_accept
Ending readSession
Then when I press Enter in server app console, it outputs Exiting run thread and cleanly stops.
Now, when I compile this same code under Linux, the client outputs the same as under Windows, but nothing happens on the server side...
Any idea what's wrong?
There are many questionable elements.
There is a classical data race on m_bContinueReading. You write from another thread, but the other thread may never see the change because of the data race.
The second race condition is likely your problem:
m_pThread = new boost::thread(boost::bind<void>(&TheServer::run, this));
listenForNewConnection();
Here the run thread may complete before you ever post the first work. You can use a work-guard to prevent this. In your specific code you would already fix it by reordering the lines:
listenForNewConnection();
m_pThread = new boost::thread(boost::bind<void>(&TheServer::run, this));
I would not do this, because I would not have those statements in my constructor body. See below for the work guard solution
There is a lot of raw pointer handling and new/delete going on, which merely invites errors.
You use the buffer assuming that it is NUL-terminated. This is especially unwarranted because you use read_some which will read partial messages as they arrive on the wire.
You use a static buffer while the code may have different instances of the class. This is very false optimization. Instead, prevent all the allocations! Combining with the previous item:
char buffer[1000];
while (m_bContinueReading) {
size_t length = m_Socket.read_some(asio::buffer(&buffer, 1000), ec);
std::cout << "Received " << length << " (" << quoted(std::string(buffer, length)) << "), "
<< ec.message() << std::endl;
if (ec.failed())
break;
}
You start a new acceptor always, where there is no need: a single acceptor can accept as many connections as you wish. In fact, the method shown runs into the problems
that lingering connections can prevent the new acceptor from binding to the same port. You could also alleviate that with
m_Acceptor.set_option(tcp::acceptor::reuse_address(true));
the destroyed acceptor may have backlogged connections, which are discarded
Typically you want to support concurrent connection, so you can split of a "readSession" and immediately accept the next connection. Now, strangely your code seems to expect clients to be connected until the server is prompted to shutdown (from the console) but after that you somehow start listening to new connections (even though you know the service will be stopping, and m_bContinueReading will remain false).
In the grand scheme of things, you don't want to destroy the acceptor unless something invalidated it. In practice this is rare (e.g. on Linux the acceptor will happily survive disabling/re-enabling the network adaptor).
you have spurious explicit template arguments (bind<void>). This is an anti-pattern and may lead to subtle problems
similar with the buffer (just say asio::buffer(buffer) and no longer have correctness concerns. In fact, don't use C-style arrays:
std::array<char, 1000> buffer;
size_t n = m_Socket.read_some(asio::buffer(buffer), ec);
std::cout << "Received " << n << " " << quoted(std::string(buffer.data(), n))
<< " (" << ec.message() << ")" << std::endl;
Instead of running a manual run_one() loop (where you forget to handle exceptions), consider "just" letting the service run(). Then you can .cancel() the acceptor to let the service run out of work.
In fact, this subtlety isn't required in your code, since your code already forces "ungraceful" shutdown anyways:
m_IOService.stop(); // nuclear option
m_Thread.join();
More gentle would be e.g.
m_Acceptor.cancel();
m_Socket.cancel();
m_Thread.join();
In which case you can respond to the completion error_code == error::operation_aborted to stop the session/accept loop.
Technically, you may be able to do away with the boolean flag altogether.
I keep it because it allows us to handle multiple session-per-thread in
"fire-and-forget" manner.
In the client you have many of the same problems, and also a gotcha where
you only look at the first resolver result (assuming there was one),
ignoring the rest. You can use asio::connect instead of
m_Socket.connect to try all resolved entries
Addressing the majority of these issues, simplifying the code:
Live On Coliru
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
#include <boost/optional.hpp>
#include <iomanip>
#include <iostream>
namespace asio = boost::asio;
using asio::ip::tcp;
using namespace std::chrono_literals;
using boost::system::error_code;
class TheServer {
public:
TheServer(int port) : m_port(port) {
m_Acceptor.set_option(tcp::acceptor::reuse_address(true));
do_accept();
}
~TheServer() {
m_shutdownRequested = true;
m_Work.reset(); // release the work-guard
m_Acceptor.cancel();
m_Thread.join();
}
private:
void do_accept() {
std::cout << "Starting async_accept" << std::endl;
m_Acceptor.async_accept( //
m_Socket, boost::bind(&TheServer::on_accept, this, asio::placeholders::error));
}
void on_accept(error_code ec) {
if (!ec) {
std::cout << "Connection established " << m_Socket.remote_endpoint() << std::endl;
// leave session running in the background:
std::thread(&TheServer::read_session_thread, this, std::move(m_Socket)).detach();
do_accept(); // and immediately accept new connection(s)
} else {
std::cout << "Connection error (" << ec.message() << ")" << std::endl;
std::cout << "Ending readSession" << std::endl;
}
}
void read_session_thread(tcp::socket sock) {
std::array<char, 1000> buffer;
for (error_code ec;;) {
size_t n = sock.read_some(asio::buffer(buffer), ec);
std::cout << "Received " << n << " " << quoted(std::string(buffer.data(), n)) << " ("
<< ec.message() << ")" << std::endl;
if (ec.failed() || m_shutdownRequested)
break;
}
std::cout << "Connection closed" << std::endl;
}
void thread_func() {
// http://www.boost.org/doc/libs/1_61_0/doc/html/boost_asio/reference/io_service.html#boost_asio.reference.io_service.effect_of_exceptions_thrown_from_handlers
for (;;) {
try {
m_IOService.run();
break; // exited normally
} catch (std::exception const& e) {
std::cerr << "[eventloop] error: " << e.what();
} catch (...) {
std::cerr << "[eventloop] unexpected error";
}
}
std::cout << "Exiting service thread" << std::endl;
}
std::atomic_bool m_shutdownRequested{false};
uint16_t m_port;
asio::io_service m_IOService;
boost::optional<asio::io_service::work> m_Work{m_IOService};
tcp::socket m_Socket{m_IOService};
tcp::acceptor m_Acceptor{m_IOService, tcp::endpoint{tcp::v4(), m_port}};
std::thread m_Thread{boost::bind(&TheServer::thread_func, this)};
};
constexpr uint16_t s_port = 1900;
void run_server() {
TheServer server(s_port);
std::cout << "Press Enter to quit" << std::endl;
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
void run_client() {
std::cout << std::endl;
std::cout << "Starting client" << std::endl;
using asio::ip::tcp;
try {
asio::io_service m_IOService;
tcp::resolver resolver(m_IOService);
auto iterator = resolver.resolve("localhost", std::to_string(s_port));
tcp::socket m_Socket(m_IOService);
connect(m_Socket, iterator);
std::cout << "Client connected" << std::endl;
std::string hello = "Hello World";
write(m_Socket, asio::buffer(hello));
std::this_thread::sleep_for(100ms);
hello += "(2)";
write(m_Socket, asio::buffer(hello));
} catch (std::exception& e) {
std::cerr << "Exception: " << e.what() << "\n";
}
}
int main(int argc, char**) {
if (argc>1)
run_server();
else
run_client();
}
I have to handle information from 100 ports in parallel for 100ms per second.
I am using Ubuntu OS.
I did some research and i saw that poll() function is a good candidate, to avoid to open 100 threads to handle in parallel data coming on udp protocol.
I did main part with boost and I tried to integrate poll() with boost.
The problem is when i am trying to send by client data to the server, I receive nothing.
According to wireshark, data are coming on the right host. (localhost, port 1234)
Did I miss something or did I put something wrong ?
The test code (server) :
#include <deque>
#include <iostream>
#include <chrono>
#include <thread>
#include <sys/poll.h>
#include <boost/optional.hpp>
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
using boost::asio::ip::udp;
using namespace boost::asio;
using namespace std::chrono_literals;
std::string ip_address = "127.0.0.1";
template<typename T, size_t N>
size_t arraySize( T(&)[N] )
{
return(N);
}
class UdpReceiver
{
using Resolver = udp::resolver;
using Sockets = std::deque<udp::socket>;
using EndPoint = udp::endpoint;
using Buffer = std::array<char, 100>; // receiver buffer
public:
explicit UdpReceiver()
: work_(std::ref(resolver_context)), thread_( [this]{ resolver_context.run(); })
{ }
~UdpReceiver()
{
work_ = boost::none; // using work to keep run active always !
thread_.join();
}
void async_resolve(udp::resolver::query const& query_) {
resolver_context.post([this, query_] { do_resolve(query_); });
}
// callback for event-loop in main thread
void run_handler(int fd_idx) {
// start reading
auto result = read(fd_idx, receive_buf.data(), sizeof(Buffer));
// increment number of received packets
received_packets = received_packets + 1;
std::cout << "Received bytes " << result << " current recorded packets " << received_packets <<'\n';
// run handler posted from resolver threads
handler_context.poll();
handler_context.reset();
}
static void handle_receive(boost::system::error_code error, udp::resolver::iterator const& iterator) {
std::cout << "handle_resolve:\n"
" " << error.message() << "\n";
if (!error)
std::cout << " " << iterator->endpoint() << "\n";
}
// get current file descriptor
int fd(size_t idx)
{
return sockets[idx].native_handle();
}
private:
void do_resolve(boost::asio::ip::udp::resolver::query const& query_) {
boost::system::error_code error;
Resolver resolver(resolver_context);
Resolver::iterator result = resolver.resolve(query_, error);
sockets.emplace_back(udp::socket(resolver_context, result->endpoint()));
// post handler callback to service running in main thread
resolver_context.post(boost::bind(&UdpReceiver::handle_receive, error, result));
}
private:
Sockets sockets;
size_t received_packets = 0;
EndPoint remote_receiver;
Buffer receive_buf {};
io_context resolver_context;
io_context handler_context;
boost::optional<boost::asio::io_context::work> work_;
std::thread thread_;
};
int main (int argc, char** argv)
{
UdpReceiver udpReceiver;
udpReceiver.async_resolve(udp::resolver::query(ip_address, std::to_string(1234)));
//logic
pollfd fds[2] { };
for(int i = 0; i < arraySize(fds); ++i)
{
fds[i].fd = udpReceiver.fd(0);
fds[i].events = 0;
fds[i].events |= POLLIN;
fcntl(fds[i].fd, F_SETFL, O_NONBLOCK);
}
// simple event-loop
while (true) {
if (poll(fds, arraySize(fds), -1)) // waiting for wakeup call. Timeout - inf
{
for(auto &fd : fds)
{
if(fd.revents & POLLIN) // checking if we have something to read
{
fd.revents = 0; // reset kernel message
udpReceiver.run_handler(fd.fd); // call resolve handler. Do read !
}
}
}
}
return 0;
}
This looks like a confused mix of C style poll code and Asio code. The point is
you don't need poll (Asio does it internally (or epoll/select/kqueue/IOCP - whatever is available)
UDP is connectionless, so you don't need more than one socket to receive all "connections" (senders)
I'd replace it all with a single udp::socket on a single thread. You don't even have to manage the thread/work:
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
Let's run an asynchronous receive loop for 5s:
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
At the end, we can report the statistics:
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
With a similated load in bash:
function client() { while read a; do echo "$a" > /dev/udp/localhost/1234 ; done < /etc/dictionaries-common/words; }
for a in {1..20}; do client& done; time wait
We get:
A total of 294808 were received from 28215 unique senders
real 0m5,007s
user 0m0,801s
sys 0m0,830s
This is obviously not optimized, the bottle neck here is likely the many many bash subshells being launched for the clients.
Full Listing
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
#include <iostream>
#include <set>
namespace net = boost::asio;
using boost::asio::ip::udp;
using boost::system::error_code;
using namespace std::chrono_literals;
int main ()
{
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
std::set<udp::endpoint> unique_senders;
size_t received_packets = 0;
{
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
}
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
}
I'm new to C++ but so far most of the asio stuff has made sense. I am however stuggling to get my UDPServer working.
My question is possibly similar to: Trying to write UDP server class, io_context doesn't block
I think my UDPServer stops before work can be given to its io_context. However, I am issuing work to the context before calling io_context.run() so I don't understand why.
Of course, I am not entirely sure if I am even on the right track with the above statement and would appreciate some guidance. Here is my class:
template<typename message_T>
class UDPServer
{
public:
UDPServer(uint16_t port)
: m_socket(m_asioContext, asio::ip::udp::endpoint(asio::ip::udp::v4(), port))
{
m_port = port;
}
virtual ~UDPServer()
{
Stop();
}
public:
// Starts the server!
bool Start()
{
try
{
// Issue a task to the asio context
WaitForMessages();
m_threadContext = std::thread([this]() { m_asioContext.run(); });
}
catch (std::exception& e)
{
// Something prohibited the server from listening
std::cerr << "[SERVER # PORT " << m_port << "] Exception: " << e.what() << "\n";
return false;
}
std::cout << "[SERVER # PORT " << m_port << "] Started!\n";
return true;
}
// Stops the server!
void Stop()
{
// Request the context to close
m_asioContext.stop();
// Tidy up the context thread
if (m_threadContext.joinable()) m_threadContext.join();
// Inform someone, anybody, if they care...
std::cout << "[SERVER # PORT " << m_port << "] Stopped!\n";
}
void WaitForMessages()
{
m_socket.async_receive_from(asio::buffer(vBuffer.data(), vBuffer.size()), m_endpoint,
[this](std::error_code ec, std::size_t length)
{
if (!ec)
{
std::cout << "[SERVER # PORT " << m_port << "] Got " << length << " bytes \n Data: " << vBuffer.data() << "\n" << "Address: " << m_endpoint.address() << " Port: " << m_endpoint.port() << "\n" << "Data: " << m_endpoint.data() << "\n";
}
else
{
std::cerr << "[SERVER # PORT " << m_port << "] Exception: " << ec.message() << "\n";
return;
}
WaitForMessages();
}
);
}
void Send(message_T& msg, const asio::ip::udp::endpoint& ep)
{
asio::post(m_asioContext,
[this, msg, ep]()
{
// If the queue has a message in it, then we must
// assume that it is in the process of asynchronously being written.
bool bWritingMessage = !m_messagesOut.empty();
m_messagesOut.push_back(msg);
if (!bWritingMessage)
{
WriteMessage(ep);
}
}
);
}
private:
void WriteMessage(const asio::ip::udp::endpoint& ep)
{
m_socket.async_send_to(asio::buffer(&m_messagesOut.front(), sizeof(message_T)), ep,
[this, ep](std::error_code ec, std::size_t length)
{
if (!ec)
{
m_messagesOut.pop_front();
// If the queue is not empty, there are more messages to send, so
// make this happen by issuing the task to send the next header.
if (!m_messagesOut.empty())
{
WriteMessage(ep);
}
}
else
{
std::cout << "[SERVER # PORT " << m_port << "] Write Header Fail.\n";
m_socket.close();
}
});
}
void ReadMessage()
{
}
private:
uint16_t m_port = 0;
asio::ip::udp::endpoint m_endpoint;
std::vector<char> vBuffer = std::vector<char>(21);
protected:
TSQueue<message_T> m_messagesIn;
TSQueue<message_T> m_messagesOut;
Message<message_T> m_tempMessageBuf;
asio::io_context m_asioContext;
std::thread m_threadContext;
asio::ip::udp::socket m_socket;
};
}
Code is invoked in the main function for now:
enum class TestMsg {
Ping,
Join,
Leave
};
int main() {
Message<TestMsg> msg; // Message is a pretty basic struct that I'm not using yet. When I was, I was only receiving the first 4 bytes - which led me down this path of investigation
msg.id = TestMsg::Join;
msg << "hello";
UDPServer<Message<TestMsg>> server(60000);
}
When invoked the Server immediately exits before it gets chance to print "[SERVER] Started"
I'll try adding the work guard as the link post describes but I would still like to understand why the io_context is not being primed with work quick enough.
Update (Now I also read the question not just the code)
While in WaitForMessages you do start listening by calling the m_socket.async_receive_from function, as it is async, that function will return/unblock as soon as it has setup the listening. So as long as you don't actually have a client sending you something, you server has nothing do to. Only when it has received something the callback will be called, by a thread calling io_context::run. So you need the work guard so that your thread running run won't unblock right after start, but will block as long as the work guard is there.
Usually it is also combined with a try/while pattern if an exception gets thrown in a handler and you still want to move on with your server.
Also in the code you posted, you never actually call UDPServer::Start!
This was my first idea of an answer:
This is normal behavior of ASIO. The io_context::run function will return as soon as it has no work to do.
So to change the behaviour of the run function to block you have to use a boost::asio::executor_work_guard<boost::asio::io_context::executor_type> i.e. a so called work guard. Construct that object with a reference to your io_context and hold it i.e. don't let it destruct as long as you want to let the server run, i.e. do not want to let io_context::run return when there is not work.
So given
boost::asio::io_context io_context_;
boost::asio::executor_work_guard<boost::asio::io_context::executor_type> work_guard_;
you then could call
work_guard_{boost::asio::make_work_guard(io_context_)},
const auto thread_count{std::max<unsigned>(std::thread::hardware_concurrency(), 1)};
std::generate_n(std::back_inserter(this->io_run_threads_),
thread_count,
[this]() {
return std::thread{io_run_loop,
std::ref(this->io_context_), std::ref(this->error_handler_)};
});
void io_run_loop(boost::asio::io_context &context,
const std::function<void(std::exception &)> &error_handler) {
while (true) {
try {
context.run();
break;
} catch (std::exception &e) {
error_handler(e);
}
}
}
And then for server shutdown:
work_guard_.reset();
io_context_.stop();
std::for_each(this->io_run_threads_.begin(), this->io_run_threads_.end(), [](auto &thread) {
if (thread.joinable()) thread.join();
});
For a more graceful shutdown you can omit the stop call and rather close all sockets before.
Looks like you forgot to call server.Start();. Moreover, you will want to make the main thread wait for some amount of time, otherwise the destructor of Server will immediately cause Stop() to be called:
int main()
{
Message<TestMsg> msg;
msg.id = TestMsg::Join;
msg << "hello";
UDPServer<Message<TestMsg>> server(60000);
server.Start();
std::this_thread::sleep_for(30s);
}
Issues
There is a conceptual problem with the Send API.
It takes an endpoint on each call, but it only uses the one that starts the write call chain! This means that if you do
srv.Send(msg1, {mymachine, 60001});
srv.Send(msg1, {otherserver, 5517});
It is likely they both get sent to mymachine:60001.
How you treat the buffer received. Just using .data() blindly assumes that the data is NUL-terminated. Don't do that:
std::string const data(vBuffer.data(), length);
Also, you seem to have at some time been confused about data and printed m_endpoint.data() - your princess is in another castle.
In reality you probably want ways to extract the typed data. I'm leaving that as beyond the scope of this question for today.
Regardless you should clear the buffer before reuse, because you might be seeing old data in subsequent reads.
vBuffer.assign(vBuffer.size(), '\0');
This is most likely undefined behaviour:
asio::buffer(&m_messagesOut.front(), sizeof(message_T)), ep,
This is only valid if message_T is trivial and standard-layout ("POD" - Plain Old Data). The presence of operator<< strongly suggests that is not the case.
Instead, build a (sequence of) buffer(s) hat represents the message as raw bytes, e.g.
auto& msg = m_messagesOut.front();
msg.length = msg.body.size();
m_socket.async_send_to(
std::vector<asio::const_buffer>{
asio::buffer(&msg.id, sizeof(msg.id)),
asio::buffer(&msg.length, sizeof(msg.length)),
asio::buffer(msg.body),
},
// ...
Thread safe queues seem to be overkill since you have a single service thread; that is an implicit "strand" so you can post to it to have single-threaded semantics.
Here's a few adaptations to make it work so far (except the exercise-for-the-reader pointed out):
Live On Coliru
#include <boost/asio.hpp>
#include <iostream>
#include <deque>
#include <sstream>
// Library facilities
namespace asio = boost::asio;
using asio::ip::udp;
using boost::system::error_code;
using namespace std::chrono_literals;
/////////////////////////////////
// mock ups:
template <typename message_T> struct Message {
message_T id;
uint16_t length; // automatically filled on send, UDP packets are < 64k
std::string body;
template <typename T> friend Message& operator<<(Message& m, T const& v)
{
std::ostringstream oss;
oss << v;
m.body += oss.str();
//m.body += '\0'; // suggestion for easier message extraction
return m;
}
};
// Thread-safety can be replaced with the implicit strand of a single service
// thread
template <typename T> using TSQueue = std::deque<T>;
// end mock ups
/////////////////////////////////
template <typename message_T> class UDPServer {
public:
UDPServer(uint16_t port)
: m_socket(m_asioContext, udp::endpoint(udp::v4(), port))
{
m_port = port;
}
virtual ~UDPServer() { Stop(); }
public:
// Starts the server!
bool Start()
{
if (m_threadContext.joinable() && !m_asioContext.stopped())
return false;
try {
// Issue a task to the asio context
WaitForMessages();
m_threadContext = std::thread([this]() { m_asioContext.run(); });
} catch (std::exception const& e) {
// Something prohibited the server from listening
std::cerr << "[SERVER # PORT " << m_port
<< "] Exception: " << e.what() << "\n";
return false;
}
std::cout << "[SERVER # PORT " << m_port << "] Started!\n";
return true;
}
// Stops the server!
void Stop()
{
// Tell the context to stop processing
m_asioContext.stop();
// Tidy up the context thread
if (m_threadContext.joinable())
m_threadContext.join();
// Inform someone, anybody, if they care...
std::cout << "[SERVER # PORT " << m_port << "] Stopped!\n";
m_asioContext
.reset(); // required in case you want to reuse this Server object
}
void Send(message_T& msg, const udp::endpoint& ep)
{
asio::post(m_asioContext, [this, msg, ep]() {
// If the queue has a message in it, then we must
// assume that it is in the process of asynchronously being written.
bool bWritingMessage = !m_messagesOut.empty();
m_messagesOut.push_back(msg);
if (!bWritingMessage) {
WriteMessage(ep);
}
});
}
private:
void WaitForMessages() // assumed to be on-strand
{
vBuffer.assign(vBuffer.size(), '\0');
m_socket.async_receive_from(
asio::buffer(vBuffer.data(), vBuffer.size()), m_endpoint,
[this](std::error_code ec, std::size_t length) {
if (!ec) {
std::string const data(vBuffer.data(), length);
std::cout << "[SERVER # PORT " << m_port << "] Got "
<< length << " bytes \n Data: " << data << "\n"
<< "Address: " << m_endpoint.address()
<< " Port: " << m_endpoint.port() << "\n"
<< std::endl;
} else {
std::cerr << "[SERVER # PORT " << m_port
<< "] Exception: " << ec.message() << "\n";
return;
}
WaitForMessages();
});
}
void WriteMessage(const udp::endpoint& ep)
{
auto& msg = m_messagesOut.front();
msg.length = msg.body.size();
m_socket.async_send_to(
std::vector<asio::const_buffer>{
asio::buffer(&msg.id, sizeof(msg.id)),
asio::buffer(&msg.length, sizeof(msg.length)),
asio::buffer(msg.body),
},
ep, [this, ep](std::error_code ec, std::size_t length) {
if (!ec) {
m_messagesOut.pop_front();
// If the queue is not empty, there are more messages to
// send, so make this happen by issuing the task to send the
// next header.
if (!m_messagesOut.empty()) {
WriteMessage(ep);
}
} else {
std::cout << "[SERVER # PORT " << m_port
<< "] Write Header Fail.\n";
m_socket.close();
}
});
}
private:
uint16_t m_port = 0;
udp::endpoint m_endpoint;
std::vector<char> vBuffer = std::vector<char>(21);
protected:
TSQueue<message_T> m_messagesIn;
TSQueue<message_T> m_messagesOut;
Message<message_T> m_tempMessageBuf;
asio::io_context m_asioContext;
std::thread m_threadContext;
udp::socket m_socket;
};
enum class TestMsg {
Ping,
Join,
Leave
};
int main()
{
UDPServer<Message<TestMsg>> server(60'000);
if (server.Start()) {
std::this_thread::sleep_for(3s);
{
Message<TestMsg> msg;
msg.id = TestMsg::Join;
msg << "hello PI equals " << M_PI << " in this world";
server.Send(msg, {{}, 60'001});
}
std::this_thread::sleep_for(27s);
}
}
For some reason netcat doesn't work with UDP on Coliru, so here's a "live" demo:
You can see our netcat client messages arriving. You can see the message Sent to 60001 arriving in the tcpdump output.
I am trying to connect to a secure websocket using asio.
This example will work for an ip address:
#include <iostream>
#include <asio.hpp>
int main() {
asio::error_code ec;
asio::io_context context;
asio::io_context::work idleWork(context);
asio::ip::tcp::endpoint endpoint(asio::ip::make_address("51.38.81.49", ec), 80);
asio::ip::tcp::socket socket(context);
socket.connect(endpoint, ec);
if (!ec) {
std::cout << "Connected!" << std::endl;
} else {
std::cout << "Failed to connect to address: \n" << ec.message() << std::endl;
}
return 0;
}
but how would I change it so I connect to "wss://api2.example.com"?
EDIT:
Thanks for your answer karastojko - it seems to get me some of the way. I would though like to know if I am actually connected to the server, so I have updated my example with your input, added a working WSS which I know will answer and created read and write.
#include <asio.hpp>
#include <asio/ts/buffer.hpp>
std::vector<char> vBuffer(1 * 1024);
// This should output the received data
void GrabSomeData(asio::ip::tcp::socket& socket) {
socket.async_read_some(asio::buffer(vBuffer.data(), vBuffer.size()),
[&](std::error_code ec, std::size_t lenght) {
if (!ec) {
std::cout << "\n\nRead " << lenght << " bytes\n\n";
for (int i = 0; i < lenght; i++)
std::cout << vBuffer[i];
GrabSomeData(socket);
}
}
);
}
int main() {
asio::error_code ec;
asio::io_context context;
asio::io_context::work idleWork(context);
std::thread thrContext = std::thread([&]() { context.run(); });
// I hope this is what you meant
asio::ip::tcp::resolver res(context);
asio::ip::tcp::socket socket(context);
asio::connect(socket, res.resolve("echo.websocket.org", std::to_string(443)));
// Check the socket is open
if (socket.is_open()) {
// Start to output incoming data
GrabSomeData(socket);
// Send data to the websocket, which should be sent back
std::string sRequest = "Echo";
socket.write_some(asio::buffer(sRequest.data(), sRequest.size()), ec);
// Wait some time, so the data is received
using namespace std::chrono_literals;
std::this_thread::sleep_for(20000ms);
context.stop();
if (thrContext.joinable()) thrContext.join();
}
return 0;
}
For that purpose use the resolver class:
tcp::resolver res(context);
asio::ip::tcp::socket socket(context);
boost::asio::connect(socket, res.resolve("api2.example.com", 80));
I want to send a struct from client to server using boost::asio. I followed boost tutorial link https://www.boost.org/doc/libs/1_47_0/doc/html/boost_asio/examples.html#boost_asio.examples.serialization. I slighty modified the code in server.cpp and client.cpp. With the new code, after a connection is established, client.cpp writes the struct stock to server and reads stock information at server side. (in the tutorial version, after a connection established, the server writes stock struct to client and client reads them. This version works for me.)
My problem is that after a connection is established, the async_write in client.cpp causes error
Error in write: An existing connection was forcibly closed by the remote host
and the async_read in server.cpp causes error
Error in read:The network connection was aborted by the local system.
As suggested by some forum answers, I changed this pointers in function handlers of async_write and async_read to shared_from_this. Still the problem exists.
I am not able to identify whether the client or the server side is causing problem. Please help.
server.cpp
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/lexical_cast.hpp>
#include <iostream>
#include <vector>
#include "connection.h" // Must come before boost/serialization headers.
#include <boost/serialization/vector.hpp>
#include <boost/enable_shared_from_this.hpp>
#include "stock.h"
namespace s11n_example
{
/// Serves stock quote information to any client that connects to it.
class server : public boost::enable_shared_from_this<server>
{
private:
/// The acceptor object used to accept incoming socket connections.
boost::asio::ip::tcp::acceptor acceptor_;
/// The data to be sent to each client.
std::vector<stock> stocks_;
public:
/// Constructor opens the acceptor and starts waiting for the first incoming
/// connection.
server(boost::asio::io_service& io_service, unsigned short port):
acceptor_(io_service, boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(), port))
{
// Start an accept operation for a new connection.
connection_ptr new_conn(new connection(acceptor_.get_io_service()));
acceptor_.async_accept(new_conn->socket(),
boost::bind(&server::handle_accept, this,boost::asio::placeholders::error, new_conn));
}
/// Handle completion of a accept operation.
void handle_accept(const boost::system::error_code& e, connection_ptr conn)
{
if (!e)
{
std::cout << "Received a connection" <<std::endl;
conn->async_read(stocks_,
boost::bind(&server::handle_read, shared_from_this(),boost::asio::placeholders::error));
}
// Start an accept operation for a new connection.
connection_ptr new_conn(new connection(acceptor_.get_io_service()));
acceptor_.async_accept(new_conn->socket(),
boost::bind(&server::handle_accept, this,boost::asio::placeholders::error, new_conn));
}
/// Handle completion of a read operation.
void handle_read(const boost::system::error_code& e)
{
if (!e)
{
// Print out the data that was received.
for (std::size_t i = 0; i < stocks_.size(); ++i)
{
std::cout << "Stock number " << i << "\n";
std::cout << " code: " << stocks_[i].code << "\n";
std::cout << " name: " << stocks_[i].name << "\n";
std::cout << " open_price: " << stocks_[i].open_price << "\n";
std::cout << " high_price: " << stocks_[i].high_price << "\n";
std::cout << " low_price: " << stocks_[i].low_price << "\n";
std::cout << " last_price: " << stocks_[i].last_price << "\n";
std::cout << " buy_price: " << stocks_[i].buy_price << "\n";
std::cout << " buy_quantity: " << stocks_[i].buy_quantity << "\n";
std::cout << " sell_price: " << stocks_[i].sell_price << "\n";
std::cout << " sell_quantity: " << stocks_[i].sell_quantity << "\n";
}
}
else
{
// An error occurred.
std::cerr << "Error in read:" << e.message() << std::endl;
}
}
};
} // namespace s11n_example
int main(int argc, char* argv[])
{
try
{
// Check command line arguments.
if (argc != 2)
{
std::cerr << "Usage: server <port>" << std::endl;
return 1;
}
unsigned short port = boost::lexical_cast<unsigned short>(argv[1]);
boost::asio::io_service io_service;
boost::shared_ptr<s11n_example::server> server(new s11n_example::server(io_service, port));
io_service.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
client.cpp
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <iostream>
#include <vector>
#include "connection.h" // Must come before boost/serialization headers.
#include <boost/serialization/vector.hpp>
#include <boost/enable_shared_from_this.hpp>
#include "stock.h"
namespace s11n_example {
/// Downloads stock quote information from a server.
class client : public boost::enable_shared_from_this<client>
{
private:
/// The connection to the server.
connection connection_;
/// The data received from the server.
std::vector<stock> stocks_;
public:
/// Constructor starts the asynchronous connect operation.
client(boost::asio::io_service& io_service, const std::string& host, const std::string& service)
: connection_(io_service)
{
// Resolve the host name into an IP address.
boost::asio::ip::tcp::resolver resolver(io_service);
boost::asio::ip::tcp::resolver::query query(host, service);
boost::asio::ip::tcp::resolver::iterator endpoint_iterator =
resolver.resolve(query);
// Start an asynchronous connect operation.
boost::asio::async_connect(connection_.socket(), endpoint_iterator,
boost::bind(&client::handle_connect, this,boost::asio::placeholders::error));
}
/// Handle completion of a connect operation.
void handle_connect(const boost::system::error_code& e) //, connection_ptr conn
{
if (!e)
{
std::cout << "Connected to server!" << std::endl;
// Create the data to be sent to each client.
stock s;
s.code = "ABC";
s.name = "A Big Company";
s.open_price = 4.56;
s.high_price = 5.12;
s.low_price = 4.33;
s.last_price = 4.98;
s.buy_price = 4.96;
s.buy_quantity = 1000;
s.sell_price = 4.99;
s.sell_quantity = 2000;
stocks_.push_back(s);
s.code = "DEF";
s.name = "Developer Entertainment Firm";
s.open_price = 20.24;
s.high_price = 22.88;
s.low_price = 19.50;
s.last_price = 19.76;
s.buy_price = 19.72;
s.buy_quantity = 34000;
s.sell_price = 19.85;
s.sell_quantity = 45000;
stocks_.push_back(s);
// Successfully established connection. Start operation to write the list
// of stocks.
connection_.async_write(stocks_,
boost::bind(&client::handle_write, shared_from_this(),boost::asio::placeholders::error)); //,&conn )
}
else
{
// An error occurred. Log it and return.
std::cerr << "Error in connecting to server" << e.message() << std::endl;
}
}
/// Handle completion of a write operation.
void handle_write(const boost::system::error_code& e)//, connection* conn
{
if (!e)
{
std::cout << "Finished writing to server" << std::endl;
}
else
{
// An error occurred. Log it and return. Since we are not starting a new
// operation the io_service will run out of work to do and the client will
// exit.
std::cerr << "Error in write: " << e.message() << std::endl;
}
// Nothing to do. The socket will be closed automatically when the last
// reference to the connection object goes away.
}
};
} // namespace s11n_example
int main(int argc, char* argv[])
{
try
{
// Check command line arguments.
if (argc != 3)
{
std::cerr << "Usage: client <host> <port>" << std::endl;
return 1;
}
boost::asio::io_service io_service;
//s11n_example::client client(io_service, argv[1], argv[2]);
boost::shared_ptr<s11n_example::client> client(new s11n_example::client(io_service, argv[1], argv[2]));
io_service.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
Thanks.
You need to pass conn to handle read otherwise it will be destructed at the end of the handle_accept method. When it's destructed the socket it contains will also be destructed and the connection will close.
conn->async_read(stocks_,
boost::bind(&server::handle_read, shared_from_this(), conn, boost::asio::placeholders::error));
Lambdas make this easier to read than using bind:
auto self = shared_from_this();
conn->async_read(stocks_,
[self, this, conn] (boost::system::error_code ec) { handle_read(ec); });
The variables listed in the capture list will be copied so the shared pointers will be kept alive.