boost asio parallel_group per operation cancellation - c++

below is a snippet of small producer/consumer example
#include <iostream>
#include <boost/asio/use_awaitable.hpp>
#include <boost/system/detail/generic_category.hpp>
#include <boost/asio/experimental/channel.hpp>
#include <boost/asio/experimental/awaitable_operators.hpp>
#include <boost/asio.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
using namespace boost::asio::experimental::awaitable_operators;
template<typename T>
struct Channel : public boost::asio::experimental::channel<void(boost::system::error_code, T)> {
using boost::asio::experimental::channel<void(boost::system::error_code, T)>::channel;};
boost::asio::awaitable<void> consumer(Channel<int>& ch1, Channel<int>& ch2,
int nexpected) {
int num = 0;
for (;;) {
auto [order, ex0, r0, ex1, r1] = co_await boost::asio::experimental::make_parallel_group(
[&ch1](auto token) {
return ch1.async_receive(std::move(token));
},
[&ch2](auto token) {
return ch2.async_receive(std::move(token));
}
).async_wait(
boost::asio::experimental::wait_for_one{},
boost::asio::use_awaitable);
std::cout << "num = " << num << std::endl;
num++;
if (num == nexpected) {
std::cout << "consumer is all done" << std::endl;
break;
}
}
assert(num == nexpected && "sent must be equal received");
}
boost::asio::awaitable<void> producer(Channel<int>& ch, int const n, int const id) {
for (int i=0; i<n; i++) {
auto value = id == 1 ? i : -i;
std::cout << "producer " << id << ": sending " << value << std::endl;
auto const [ec] = co_await ch.async_send(
boost::system::error_code{},
value,
boost::asio::experimental::as_tuple(boost::asio::use_awaitable));
if (ec) std::cout << "error!" << std::endl;
}
co_return;
}
void test0() {
auto ctx = boost::asio::io_context{};
std::size_t const n = 10;
auto ch1 = Channel<int>{ctx, 10};
auto ch2 = Channel<int>{ctx, 10};
boost::asio::co_spawn(
ctx,
producer(ch2, 100, 2),
boost::asio::detached
);
boost::asio::co_spawn(
ctx,
producer(ch1, 100, 1),
boost::asio::detached
);
boost::asio::co_spawn(
ctx,
consumer(ch1, ch2, 200),
boost::asio::detached
);
ctx.run();
}
int main() {
test0();
return 0;
}
In short, there are 2 boost asio experimental channels. there are 2 producers and 1 consumer. consumer reads from either one of these channels. I"m using make_parallel_group with wait_for_one, which waits for one and cancels the others.
When running the program, I observe that one async_receive completes, the other is cancelled and the async_send is somehow cancelled without error code stating that it was cancelled. Basically that means that consumer sees only 100 values. i expect to see all 200 values.
questions:
I'm expecting per operation cancellation here. async_receive cancelled does not force cancelling of async_send.
looking at the source code of parallel_group (detail namespace), I do not see calls to bind_cancellation_slot one a per operation basis... am i missing something?
thanks

Related

Boost Asio and Udp Poll() No incoming data

I have to handle information from 100 ports in parallel for 100ms per second.
I am using Ubuntu OS.
I did some research and i saw that poll() function is a good candidate, to avoid to open 100 threads to handle in parallel data coming on udp protocol.
I did main part with boost and I tried to integrate poll() with boost.
The problem is when i am trying to send by client data to the server, I receive nothing.
According to wireshark, data are coming on the right host. (localhost, port 1234)
Did I miss something or did I put something wrong ?
The test code (server) :
#include <deque>
#include <iostream>
#include <chrono>
#include <thread>
#include <sys/poll.h>
#include <boost/optional.hpp>
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
using boost::asio::ip::udp;
using namespace boost::asio;
using namespace std::chrono_literals;
std::string ip_address = "127.0.0.1";
template<typename T, size_t N>
size_t arraySize( T(&)[N] )
{
return(N);
}
class UdpReceiver
{
using Resolver = udp::resolver;
using Sockets = std::deque<udp::socket>;
using EndPoint = udp::endpoint;
using Buffer = std::array<char, 100>; // receiver buffer
public:
explicit UdpReceiver()
: work_(std::ref(resolver_context)), thread_( [this]{ resolver_context.run(); })
{ }
~UdpReceiver()
{
work_ = boost::none; // using work to keep run active always !
thread_.join();
}
void async_resolve(udp::resolver::query const& query_) {
resolver_context.post([this, query_] { do_resolve(query_); });
}
// callback for event-loop in main thread
void run_handler(int fd_idx) {
// start reading
auto result = read(fd_idx, receive_buf.data(), sizeof(Buffer));
// increment number of received packets
received_packets = received_packets + 1;
std::cout << "Received bytes " << result << " current recorded packets " << received_packets <<'\n';
// run handler posted from resolver threads
handler_context.poll();
handler_context.reset();
}
static void handle_receive(boost::system::error_code error, udp::resolver::iterator const& iterator) {
std::cout << "handle_resolve:\n"
" " << error.message() << "\n";
if (!error)
std::cout << " " << iterator->endpoint() << "\n";
}
// get current file descriptor
int fd(size_t idx)
{
return sockets[idx].native_handle();
}
private:
void do_resolve(boost::asio::ip::udp::resolver::query const& query_) {
boost::system::error_code error;
Resolver resolver(resolver_context);
Resolver::iterator result = resolver.resolve(query_, error);
sockets.emplace_back(udp::socket(resolver_context, result->endpoint()));
// post handler callback to service running in main thread
resolver_context.post(boost::bind(&UdpReceiver::handle_receive, error, result));
}
private:
Sockets sockets;
size_t received_packets = 0;
EndPoint remote_receiver;
Buffer receive_buf {};
io_context resolver_context;
io_context handler_context;
boost::optional<boost::asio::io_context::work> work_;
std::thread thread_;
};
int main (int argc, char** argv)
{
UdpReceiver udpReceiver;
udpReceiver.async_resolve(udp::resolver::query(ip_address, std::to_string(1234)));
//logic
pollfd fds[2] { };
for(int i = 0; i < arraySize(fds); ++i)
{
fds[i].fd = udpReceiver.fd(0);
fds[i].events = 0;
fds[i].events |= POLLIN;
fcntl(fds[i].fd, F_SETFL, O_NONBLOCK);
}
// simple event-loop
while (true) {
if (poll(fds, arraySize(fds), -1)) // waiting for wakeup call. Timeout - inf
{
for(auto &fd : fds)
{
if(fd.revents & POLLIN) // checking if we have something to read
{
fd.revents = 0; // reset kernel message
udpReceiver.run_handler(fd.fd); // call resolve handler. Do read !
}
}
}
}
return 0;
}
This looks like a confused mix of C style poll code and Asio code. The point is
you don't need poll (Asio does it internally (or epoll/select/kqueue/IOCP - whatever is available)
UDP is connectionless, so you don't need more than one socket to receive all "connections" (senders)
I'd replace it all with a single udp::socket on a single thread. You don't even have to manage the thread/work:
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
Let's run an asynchronous receive loop for 5s:
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
At the end, we can report the statistics:
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
With a similated load in bash:
function client() { while read a; do echo "$a" > /dev/udp/localhost/1234 ; done < /etc/dictionaries-common/words; }
for a in {1..20}; do client& done; time wait
We get:
A total of 294808 were received from 28215 unique senders
real 0m5,007s
user 0m0,801s
sys 0m0,830s
This is obviously not optimized, the bottle neck here is likely the many many bash subshells being launched for the clients.
Full Listing
#include <boost/asio.hpp>
#include <boost/bind/bind.hpp>
#include <iostream>
#include <set>
namespace net = boost::asio;
using boost::asio::ip::udp;
using boost::system::error_code;
using namespace std::chrono_literals;
int main ()
{
net::thread_pool io(1); // single threaded
udp::socket s{io, {{}, 1234}};
std::set<udp::endpoint> unique_senders;
size_t received_packets = 0;
{
std::array<char, 100> receive_buffer;
udp::endpoint sender;
std::function<void(error_code, size_t)> read_loop;
read_loop = [&](error_code ec, size_t bytes) {
if (bytes != size_t(-1)) {
//std::cout << "read_loop (" << ec.message() << ")\n";
if (ec)
return;
received_packets += 1;
unique_senders.insert(sender);
//std::cout << "Received:" << bytes << " sender:" << sender << " recorded:" << received_packets << "\n";
//std::cout << std::string_view(receive_buffer.data(), bytes) << "\n";
}
s.async_receive_from(net::buffer(receive_buffer), sender, read_loop);
};
read_loop(error_code{}, -1); // prime the async pump
// after 5s stop
std::this_thread::sleep_for(5s);
post(io, [&s] { s.cancel(); });
io.join();
}
std::cout << "A total of " << received_packets << " were received from "
<< unique_senders.size() << " unique senders\n";
}

boost::asio doesn't trigger the read handler, while wireshark sees data comming in

I am trying to send some data and act on the reply. I see (using wireshark) that data is sent and received by the system, but boost::asio doesn't trigger my callback. Does somebody has an idea what I am doing wrong?
#include <asio.hpp>
#include <bits/stdint-uintn.h>
#include <chrono>
#include <condition_variable>
#include <cstddef>
#include <iostream>
#include <memory>
#include <mutex>
#include <string>
#include <system_error>
#include <thread>
static const int polynomial = 0x1021; // represents x^16+x^12+x^5+1
uint16_t calc(uint8_t* bytes, std::size_t length)
{
uint16_t new_crc = 0x0000;
// bytes part
for (std::size_t j = 0; j < length; ++j)
{
for (int i = 0; i < 8; ++i)
{
bool bit = ((bytes[j] >> (7 - i) & 1) == 1);
bool c15 = ((new_crc >> 15 & 1) == 1);
new_crc <<= 1;
// If coefficient of bit and remainder polynomial = 1 xor crc with polynomial
if (c15 ^ bit) new_crc ^= polynomial;
}
}
return new_crc;
}
int main(int argc, const char* argv[])
{
asio::io_service main_io_service;
std::string ip = "192.168.100.155";
int portP = 4001, portS = 4002;
auto sPrimary = std::shared_ptr<asio::ip::tcp::socket>(new asio::ip::tcp::socket(main_io_service));
auto sSecondary = std::shared_ptr<asio::ip::tcp::socket>(new asio::ip::tcp::socket(main_io_service));
auto epPrimary = asio::ip::tcp::endpoint(asio::ip::address::from_string(ip), portP);
auto epSecondary = asio::ip::tcp::endpoint(asio::ip::address::from_string(ip), portS);
std::error_code ec;
sPrimary->connect(epPrimary, ec);
if (ec || !sPrimary->is_open())
{
std::cerr << "primary failed to connect" << std::endl;
}
ec.clear();
sSecondary->connect(epSecondary, ec);
if (ec || !sSecondary->is_open())
{
std::cerr << "secondary failed to connect" << std::endl;
}
std::mutex mutex;
std::unique_lock<std::mutex> lock(mutex);
std::condition_variable cv;
const std::size_t msgSize = 9;
uint8_t msg[msgSize];
int i = 0;
msg[i++] = 0x02;
msg[i++] = 0xFF;
msg[i++] = 0x00;
msg[i++] = 0x00;
msg[i++] = 0x00;
msg[i++] = 0x00;
uint16_t crc = calc(msg, i);
msg[i++] = (uint8_t) (crc & 0xFF);
msg[i++] = (uint8_t) (crc >> 8);
msg[i++] = 0x03;
const std::size_t buffSize = 1024;
uint8_t buff[buffSize];
std::size_t bytesRead = 0;
asio::async_write((*sPrimary.get()), asio::buffer(msg, msgSize), [&sPrimary, &cv, &buff, &buffSize, &bytesRead](const std::error_code &ec, std::size_t bytesWritten)
{
asio::async_read((*sPrimary.get()), asio::buffer(buff, buffSize), [&cv, &bytesRead](const std::error_code &ec, std::size_t currentBytesRead)
{
bytesRead += currentBytesRead;
cv.notify_one();
});
});
main_io_service.run();
cv.wait(lock);
for (std::size_t i = 0; i < bytesRead; ++i)
std::cout << std::hex << buff[i];
main_io_service.stop();
return 0;
}
Just added the whole test code that will compile. Although you need a device that answers. This code talks to a serial server that has a piece of hardware that replies on the sent packet.
Thanks!
The problem that #rafix07 calls out is your problem.
Even if you "fake it out" by running io_service::run() on another thread, you technically still have a time window for the same race condition.
In general, locking synchronization primitives do not mix with task-based parallelism. In this case it would very much appear you just want to
post another task to the service when the read completes
expire a timer that you can respond to
In the very simple case of your code, you could even use other, simpler, options:
exploit the fact that run() blocks until all handlers completed. That is to say, you can take the sheer fact that run() returned as indication that the read completed:
don't use asynchronous handlers, since it doesn't serve a purpose (this could be down to oversimplified example code here)
4. use synchronous IO
This is by far the simplest. Many other simplifications made throughout the program
Live On Coliru
#include <cstdint>
#include <iostream>
#include <string>
#ifndef NOBOOST
#include <boost/asio.hpp>
namespace asio = boost::asio;
using boost::system::error_code;
#else
#include <asio.hpp>
namespace asio = boost::asio;
using std::error_code;
#endif
static const int polynomial = 0x1021; // represents x^16+x^12+x^5+1
uint16_t calc_crc(uint8_t* bytes, std::size_t length) {
uint16_t new_crc = 0x0000;
// bytes part
for (std::size_t j = 0; j < length; ++j) {
for (int i = 0; i < 8; ++i) {
bool bit = ((bytes[j] >> (7 - i) & 1) == 1);
bool c15 = ((new_crc >> 15 & 1) == 1);
new_crc <<= 1;
// If coefficient of bit and remainder polynomial = 1 xor crc with polynomial
if (c15 ^ bit)
new_crc ^= polynomial;
}
}
return new_crc;
}
int main() {
static const std::string ip = "127.0.0.1";
static const unsigned short portP = 4001, portS = 4002;
using asio::ip::address;
asio::io_service io;
asio::ip::tcp::socket sPrimary(io), sSecondary(io);
sPrimary.connect({ address::from_string(ip), portP });
sSecondary.connect({ address::from_string(ip), portS });
uint8_t msg[] {
0x02, 0xFF, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, //crc
0x03
};
{ // set crc
uint16_t const crc = calc_crc(msg, sizeof(msg)-3);
msg[sizeof(msg)-3] = (uint8_t)(crc & 0xFF);
msg[sizeof(msg)-2] = (uint8_t)(crc >> 8);
}
std::string buff;
auto bytesWritten = asio::write(sPrimary, asio::buffer(msg));
std::cout << bytesWritten << " sent" << std::endl;
auto bytesRead = asio::read(sPrimary, asio::dynamic_buffer(buff), asio::transfer_exactly(32));
std::cout << bytesRead << " received" << std::endl;
for (uint8_t ch : buff)
std::cout << std::hex << static_cast<int>(ch);
std::cout << std::endl;
}
Prints
9 sent
32 received
23696e636c756465203c63737464696e743ea23696e636c756465203c696f73
And indeed, that's the hex encoding of the first 32 bytes of main.cpp
3. use implicit completion
Trust that the handlers ran if run() returns (error handling would be required). The code is essentially the same but with more elaborate concerns around lambda captures and object lifetimes.
Note: all other simplifications still apply
Live On Coliru
asio::async_write(sPrimary, asio::buffer(msg), [&sPrimary, &buff](error_code ec, size_t bytesWritten) {
std::cout << "async_write: " << ec.message() << ", " << bytesWritten << " sent" << std::endl;
asio::async_read(sPrimary, asio::dynamic_buffer(buff), asio::transfer_exactly(32), [](error_code ec, size_t bytesRead) {
std::cout << "async_read: " << ec.message() << ", " << bytesRead << " received" << std::endl;
});
});
io.run();
for (uint8_t ch : buff)
std::cout << std::hex << static_cast<int>(ch);
std::cout << std::endl;
Prints:
async_write: Success, 9 sent
async_read: Success, 32 received
23696e636c756465203c63737464696e743ea23696e636c756465203c696f73
2. use a timer signal
This most closely "resembles" the CV approach you had, by using a timer object to represent the condition.
notably, this does error handling better than the above "3." code
note also, it guarantees to call the completion handler of signal_complete (unless the program terminates prematurely)
as such, the information is in the expiry() of the timer, not in the error code (the time will always appear canceled)
Live On Coliru
std::string buff;
asio::high_resolution_timer signal_complete(io, std::chrono::high_resolution_clock::time_point::max());
signal_complete.async_wait([&signal_complete, &buff](error_code ec) {
std::cout << "signal_complete: " << ec.message() << std::endl;
if (signal_complete.expiry() < std::chrono::high_resolution_clock::now()) {
for (uint8_t ch : buff)
std::cout << std::hex << static_cast<int>(ch);
std::cout << std::endl;
}
});
asio::async_write(sPrimary, asio::buffer(msg), [&sPrimary, &buff, &signal_complete](error_code ec, size_t bytesWritten) {
std::cout << "async_write: " << ec.message() << ", " << bytesWritten << " sent" << std::endl;
asio::async_read(sPrimary, asio::dynamic_buffer(buff), asio::transfer_exactly(32), [&signal_complete](error_code ec, size_t bytesRead) {
std::cout << "async_read: " << ec.message() << ", " << bytesRead << " received" << std::endl;
if (!ec) {
signal_complete.expires_at(std::chrono::high_resolution_clock::time_point::min());
} else {
signal_complete.cancel();
}
});
});
io.run();
Prints:
async_write: Success, 9 sent
async_read: Success, 32 received
signal_complete: Operation canceled
23696e636c756465203c63737464696e743ea23696e636c756465203c696f73
1. Post another task when read completes
This is the most natural fit to most async IO scenarios, because it puts all tasks in the same queue.
The only part that is further complicated is getting the life-times of (shared) objects right.
Live On Coliru
std::string buff;
asio::async_write(sPrimary, asio::buffer(msg), [&io, &sPrimary, &buff](error_code ec, size_t bytesWritten) {
std::cout << "async_write: " << ec.message() << ", " << bytesWritten << " sent" << std::endl;
asio::async_read(sPrimary, asio::dynamic_buffer(buff), asio::transfer_exactly(32), [&io, &buff](error_code ec, size_t bytesRead) {
std::cout << "async_read: " << ec.message() << ", " << bytesRead << " received" << std::endl;
if (!ec) {
post(io, [&buff] {
for (uint8_t ch : buff)
std::cout << std::hex << static_cast<int>(ch);
std::cout << std::endl;
});
}
});
});
io.run();
Printing, again:
async_write: Success, 9 sent
async_read: Success, 32 received
23696e636c756465203c63737464696e743ea23696e636c756465203c696f73

Difficulties in assigning threads for function handlers of async operations in boost asio

I knew that the thread in which runs io_service.run() is responsible of executing function handlers of an asynchronous operation, but I have problems in assigning a thread for an asynchronous operation that fires in callback function of a parent async operation.
For example consider the bellow program:
#ifdef WIN32
#define _WIN32_WINNT 0x0501
#include <stdio.h>
#endif
#include <fstream> // for writting to file
#include <iostream> // for writting to file
#include <stdlib.h> // atoi (string to integer)
#include <chrono>
#include <boost/thread.hpp> // for multi threading
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <signal.h> // For Interrupt Handling (Signal Handling Event)
#include <vector>
#define max_length 46
#define server_ip1 "127.0.0.1"
//#define server_ip2 "127.0.0.1"
#define server_port 4000
#define MEM_FN(x) boost::bind(&self_type::x, shared_from_this())
#define MEM_FN1(x,y) boost::bind(&self_type::x, shared_from_this(),y)
#define MEM_FN2(x,y,z) boost::bind(&self_type::x, shared_from_this(),y,z)
void talk1();
using namespace boost::asio;
io_service service, service2;
std::chrono::time_point<std::chrono::high_resolution_clock> t_start;
ip::udp::socket sock1(service);
ip::udp::endpoint ep1( ip::address::from_string(server_ip1), 4000);
//ip::udp::socket sock2(service);
//ip::udp::endpoint ep2( ip::address::from_string(server_ip2), 4000);
std::chrono::time_point<std::chrono::high_resolution_clock> tc;
int OnCon[2];
class talk_to_svr1 : public boost::enable_shared_from_this<talk_to_svr1>, boost::noncopyable {
typedef talk_to_svr1 self_type;
talk_to_svr1(const std::string & message, ip::udp::endpoint ep) : started_(true), message_(message) {}
void start(ip::udp::endpoint ep) {
do_write(message_);
}
public:
typedef boost::system::error_code error_code;
typedef boost::shared_ptr<talk_to_svr1> ptr;
static ptr start(ip::udp::endpoint ep, const std::string & message) {
ptr new_(new talk_to_svr1(message, ep));
new_->start(ep);
return new_;
}
bool started() { return started_; }
private:
void on_read(const error_code & err, size_t bytes) {
this->t2 = std::chrono::high_resolution_clock::now(); // Time of finished reading
if ( !err) {
auto t0_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t0-t_start).count();
auto t1_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t1-t_start).count();
auto t2_rel = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(t2-t_start).count();
std::cout << "Sock1: " << t0_rel << ", " << t1_rel << ", " << t2_rel << std::endl;
std::string msg(read_buffer_, bytes);
std::cout << msg << std::endl;
}
else {
std::cout << "Error occured in reading data from server (Sock1)" << std::endl;
}
}
void on_write(const error_code & err, size_t bytes) {
this->t1 = std::chrono::high_resolution_clock::now(); // Time of finished writting
std::cout << "Sock1 successfully sent " << bytes << " bytes of data" << std::endl;
do_read();
}
void do_read() {
sock1.async_receive_from(buffer(read_buffer_),ep1 ,MEM_FN2(on_read,_1,_2));
}
void do_write(const std::string & msg) {
if ( !started() ) return;
std::copy(msg.begin(), msg.end(), write_buffer_);
this->t0 = std::chrono::high_resolution_clock::now(); // Time of starting to write
sock1.async_send_to( buffer(write_buffer_, msg.size()), ep1, MEM_FN2(on_write,_1,_2) );
}
public:
std::chrono::time_point<std::chrono::high_resolution_clock> t0; // Time of starting to write
std::chrono::time_point<std::chrono::high_resolution_clock> t1; // Time of finished writting
std::chrono::time_point<std::chrono::high_resolution_clock> t2; // Time of finished reading
private:
int indx;
char read_buffer_[max_length];
char write_buffer_[max_length];
bool started_;
std::string message_;
};
void wait_s(int seconds)
{
boost::this_thread::sleep_for(boost::chrono::seconds{seconds});
}
void wait_ms(int msecs) {
boost::this_thread::sleep( boost::posix_time::millisec(msecs));
}
void async_thread() {
service.run();
}
void async_thread2() {
service2.run();
}
void GoOperational(int indx) {
if (indx == 0) {
talk_to_svr1::start(ep1, "Message01");
wait_s(1);
talk_to_svr1::start(ep1, "Message02");
wait_s(2);
}
else if (indx == 1) {
//talk_to_svr2::start(ep2, "Masoud");
wait_s(1);
//talk_to_svr2::start(ep2, "Ahmad");
wait_s(2);
}
else {
std::cout << "Wrong index!." << std::endl;
}
}
void on_connect(const boost::system::error_code & err, int ii) {
std::cout << "Socket "<< ii << " is connected."<< std::endl;
OnCon[ii] = 1;
if ( !err) {
tc = std::chrono::high_resolution_clock::now();
auto ty = 1.e-9*std::chrono::duration_cast<std::chrono::nanoseconds>(tc-t_start).count();
std::cout << "Sock " << ii << " connected at time: " << ty << " seconds" << std::endl;
if ( (OnCon[0] /*+ OnCon[1]*/ ) == 1) {
GoOperational(0);
//GoOperational(1);
}
}
else {
std::cout << "Socket " << ii << "had a problem for connecting to server.";
}
}
int main(int argc, char* argv[]) {
OnCon[0] = 0;
OnCon[1] = 0;
ep1 = ep1;
//ep2 = ep2;
std::cout.precision(9);
std::cout << "///////////////////////" << std::endl;
std::cout << "Socket Number, Time of starting to write, Time of finished writting, time of finished reading" << std::endl;
t_start = std::chrono::high_resolution_clock::now();
sock1.async_connect(ep1, boost::bind(on_connect, boost::asio::placeholders::error, 0));
//sock2.async_connect(ep2, boost::bind(on_connect, boost::asio::placeholders::error, 1));
boost::thread b{boost::bind(async_thread)};
b.join();
}
In this program I have a global udp socket named sock1 which will connect by running sock1.async_connect() at line #9 of main function. At the callback function of this asynchronous operation, I make two instance of talk_to_svr1 class which each of them is responsible for sending a messages to server and then receiving the response from server asynchronously.
I need to wait 3 seconds before sending second message and that is why I called wait_s(1) before making second instance of talk_to_svr1. The problem is that calling wait_s(1) in addition to pausing the main thread will also pause the the asynchronous sending operation which is not desired.
I would be grateful if anybody could change the above code in a way that another thread become responsible for asynchronously sending message to server so that calling wait_s(1) will not pause sending operation.
Note: posted an alternative using coroutines as well
Asynchronous coding by definition doesn't require you to "control" threads. In fact, you shouldn't need threads. Of course, you can't block inside completion handlers because that will hinder progress.
You can simply use a timer, expiring in 3s, async_wait for it and in its completion handler send the second request.
Here's a big cleanup of your code. Note that I removed all use of global variables. They were making things very error prone and leading to a lot of duplication (in fact talk_to_svr1 hardcoded ep1 and sock1 so it was useless for your second channel, that was largely commented out).
The crux of the change is to have message_operation take a continuation:
template <typename F_>
void async_message(udp::socket& s, std::string const& message, F_&& handler) {
using Op = message_operation<F_>;
boost::shared_ptr<Op> new_(new Op(s, message, std::forward<F_>(handler)));
new_->do_write();
}
When the message/response is completed, handler is called. Now, we can implement the application protocol (basically what you tried to capture in on_connect/GoOperational):
////////////////////////////////////////////////////
// basic protocol (2 messages, 1 delay)
struct ApplicationProtocol {
ApplicationProtocol(ba::io_service& service, udp::endpoint ep, std::string m1, std::string m2, std::chrono::seconds delay = 3s)
: _service(service),
_endpoint(ep),
message1(std::move(m1)), message2(std::move(m2)),
delay(delay), timer(service)
{ }
void go() {
_socket.async_connect(_endpoint, boost::bind(&ApplicationProtocol::on_connect, this, _1));
}
private:
ba::io_service& _service;
udp::socket _socket{_service};
udp::endpoint _endpoint;
std::string message1, message2;
std::chrono::seconds delay;
ba::high_resolution_timer timer;
void on_connect(error_code ec) {
std::cout << _endpoint << " connected at " << relatime() << " ms\n";
if (!ec) {
async_message(_socket, message1, boost::bind(&ApplicationProtocol::on_message1_sent, this, _1, _2));
} else {
std::cout << "Socket had a problem for connecting to server.";
}
}
void on_message1_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 1 failed: " << ec.message() << "\n";
else {
std::cout << "Message 1 returned: '" << response << "'\n";
timer.expires_from_now(delay);
timer.async_wait(boost::bind(&ApplicationProtocol::on_delay_complete, this, _1));
}
}
void on_delay_complete(error_code ec) {
if (ec)
std::cout << "Delay faile: " << ec.message() << "\n";
else {
std::cout << "Delay completed\n";
async_message(_socket, message2, boost::bind(&ApplicationProtocol::on_message2_sent, this, _1, _2));
}
}
void on_message2_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 2 failed: " << ec.message() << "\n";
else {
std::cout << "Message 2 returned: '" << response << "'\n";
}
}
};
Note how much simpler it becomes to use it:
int main() {
ba::io_service service;
std::cout.precision(2);
std::cout << std::fixed;
ApplicationProtocol
channel1(service, {{}, 4000}, "Message01\n", "Message02\n", 3s),
channel2(service, {{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
channel1.go();
channel2.go();
service.run();
}
When running two udp services like so:
yes first|nl|netcat -ulp 4000& yes second|nl|netcat -ulp 4001& time wait
We get the following output: Live On Coliru
0.0.0.0:4000 connected at 1.87 ms
0.0.0.0:4001 connected at 1.99 ms
127.0.0.1:4000 successfully sent 10 bytes of data
127.0.0.1:4001 successfully sent 7 bytes of data
127.0.0.1:4000: start 1.91, written 2.03, finished 2.25 ms
Message 1 returned: ' 1 first
2 first
3 first
4 '
127.0.0.1:4001: start 2.00, written 2.06, finished 2.34 ms
Message 1 returned: ' 1 second
2 second
3 second
'
Delay completed
127.0.0.1:4001 successfully sent 6 bytes of data
127.0.0.1:4001: start 2002.46, written 2002.49, finished 2002.53 ms
Message 2 returned: '47 second
148 second
149 second
150 s'
Delay completed
127.0.0.1:4000 successfully sent 10 bytes of data
127.0.0.1:4000: start 3002.36, written 3002.39, finished 3002.41 ms
Message 2 returned: 'first
159 first
160 first
161 first
'
And the server side receives the following messages in sequence:
Full Code
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <boost/bind.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/shared_ptr.hpp>
#include <chrono>
#include <iostream>
#define MEM_FN2(x, y, z) boost::bind(&self_type::x, shared_from_this(), y, z)
namespace ba = boost::asio;
using ba::ip::udp;
using boost::system::error_code;
using ba::asio_handler_invoke;
////////////////////////////////////////////////////
// timing stuff
using namespace std::chrono_literals;
using hrclock = std::chrono::high_resolution_clock;
using time_point = hrclock::time_point;
static double relatime(time_point tp = hrclock::now()) {
static const time_point t_start = hrclock::now();
return (tp - t_start)/1.0ms;
}
////////////////////////////////////////////////////
// message operation - with F continuation
template <typename F>
class message_operation : public boost::enable_shared_from_this<message_operation<F> >, boost::noncopyable {
typedef message_operation self_type;
template <typename F_>
friend void async_message(udp::socket&, std::string const&, F_&&);
private:
template <typename F_>
message_operation(udp::socket& s, std::string message, F_&& handler)
: _socket(s), _endpoint(s.remote_endpoint()), handler_(std::forward<F_>(handler)), message_(std::move(message)) {}
using boost::enable_shared_from_this<message_operation>::shared_from_this;
void do_write() {
t0 = hrclock::now(); // Time of starting to write
_socket.async_send_to(ba::buffer(message_), _endpoint, MEM_FN2(on_write, _1, _2));
}
void on_write(const error_code & err, size_t bytes) {
t1 = hrclock::now(); // Time of finished writting
if (err)
handler_(err, "");
else
{
std::cout << _endpoint << " successfully sent " << bytes << " bytes of data\n";
do_read();
}
}
void do_read() {
_socket.async_receive_from(ba::buffer(read_buffer_), _sender, MEM_FN2(on_read, _1, _2));
}
void on_read(const error_code &err, size_t bytes) {
t2 = hrclock::now(); // Time of finished reading
if (!err) {
std::cout << _endpoint
<< ": start " << relatime(t0)
<< ", written " << relatime(t1)
<< ", finished " << relatime(t2)
<< " ms\n";
handler_(err, std::string(read_buffer_, bytes));
} else {
std::cout << "Error occured in reading data from server\n";
}
}
time_point t0, t1, t2; // Time of starting to write, finished writting, finished reading
// params
udp::socket& _socket;
udp::endpoint _endpoint;
F handler_;
// sending
std::string message_;
// receiving
udp::endpoint _sender;
char read_buffer_[46];
};
template <typename F_>
void async_message(udp::socket& s, std::string const& message, F_&& handler) {
using Op = message_operation<F_>;
boost::shared_ptr<Op> new_(new Op(s, message, std::forward<F_>(handler)));
new_->do_write();
}
////////////////////////////////////////////////////
// basic protocol (2 messages, 1 delay)
struct ApplicationProtocol {
ApplicationProtocol(ba::io_service& service, udp::endpoint ep, std::string m1, std::string m2, std::chrono::seconds delay = 3s)
: _service(service),
_endpoint(ep),
message1(std::move(m1)), message2(std::move(m2)),
delay(delay), timer(service)
{ }
void go() {
_socket.async_connect(_endpoint, boost::bind(&ApplicationProtocol::on_connect, this, _1));
}
private:
ba::io_service& _service;
udp::socket _socket{_service};
udp::endpoint _endpoint;
std::string message1, message2;
std::chrono::seconds delay;
ba::high_resolution_timer timer;
void on_connect(error_code ec) {
std::cout << _endpoint << " connected at " << relatime() << " ms\n";
if (!ec) {
async_message(_socket, message1, boost::bind(&ApplicationProtocol::on_message1_sent, this, _1, _2));
} else {
std::cout << "Socket had a problem for connecting to server.";
}
}
void on_message1_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 1 failed: " << ec.message() << "\n";
else {
std::cout << "Message 1 returned: '" << response << "'\n";
timer.expires_from_now(delay);
timer.async_wait(boost::bind(&ApplicationProtocol::on_delay_complete, this, _1));
}
}
void on_delay_complete(error_code ec) {
if (ec)
std::cout << "Delay faile: " << ec.message() << "\n";
else {
std::cout << "Delay completed\n";
async_message(_socket, message2, boost::bind(&ApplicationProtocol::on_message2_sent, this, _1, _2));
}
}
void on_message2_sent(error_code ec, std::string response) {
if (ec)
std::cout << "Message 2 failed: " << ec.message() << "\n";
else {
std::cout << "Message 2 returned: '" << response << "'\n";
}
}
};
int main() {
ba::io_service service;
relatime(); // start the clock
std::cout.precision(2);
std::cout << std::fixed;
ApplicationProtocol
channel1(service, {{}, 4000}, "Message01\n", "Message02\n", 3s),
channel2(service, {{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
channel1.go();
channel2.go();
service.run();
}
In addition to the "normal" answer posted before, here's one that does exactly the same but using coroutines:
Live On Coliru
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
#include <boost/asio/high_resolution_timer.hpp>
#include <iostream>
namespace ba = boost::asio;
using ba::ip::udp;
using boost::system::error_code;
////////////////////////////////////////////////////
// timing stuff
using namespace std::chrono_literals;
using hrclock = std::chrono::high_resolution_clock;
using time_point = hrclock::time_point;
static double relatime(time_point tp = hrclock::now()) {
static const time_point t_start = hrclock::now();
return (tp - t_start)/1.0ms;
}
int main() {
ba::io_service service;
relatime(); // start the clock
std::cout.precision(2);
std::cout << std::fixed;
auto go = [&](udp::endpoint ep, std::string const& m1, std::string const& m2, hrclock::duration delay) {
ba::spawn(service, [=,&service](ba::yield_context yield) {
udp::socket sock(service);
time_point t0, t1, t2;
auto async_message = [&](std::string const& message) {
t0 = hrclock::now();
auto bytes = sock.async_send_to(ba::buffer(message), ep, yield);
t1 = hrclock::now();
char read_buffer_[46];
udp::endpoint _sender;
bytes = sock.async_receive_from(ba::buffer(read_buffer_), _sender, yield);
t2 = hrclock::now();
return std::string {read_buffer_, bytes};
};
try {
sock.async_connect(ep, yield);
std::cout << ep << " connected at " << relatime() << " ms\n";
std::cout << "Message 1 returned: '" << async_message(m1) << "'\n";
std::cout << ep << ": start " << relatime(t0) << ", written " << relatime(t1) << ", finished " << relatime(t2) << " ms\n";
ba::high_resolution_timer timer(service, delay);
timer.async_wait(yield);
std::cout << "Message 2 returned: '" << async_message(m2) << "'\n";
std::cout << ep << ": start " << relatime(t0) << ", written " << relatime(t1) << ", finished " << relatime(t2) << " ms\n";
} catch(std::exception const& e) {
std::cout << ep << " error: " << e.what() << "\n";
}
});
};
go({{}, 4000}, "Message01\n", "Message02\n", 3s),
go({{}, 4001}, "Masoud\n", "Ahmad\n", 2s);
service.run();
}
As you can see, using coroutines has the luxury of having all coro state "implicitly" on the coro stack. This means: no more adhoc classes for async operations with state, and vastly reduced lifetime issues.
Output
0.0.0.0:4000 connected at 0.52 ms
Message 1 returned: '0.0.0.0:4001 connected at 0.64 ms
Message 1 returned: ' 1 first
2 first
3 first
4 '
0.0.0.0:4000: start 0.55, written 0.68, finished 0.86 ms
1 second
2 second
3 second
'
0.0.0.0:4001: start 0.65, written 0.70, finished 0.91 ms
Message 2 returned: '47 second
148 second
149 second
150 s'
0.0.0.0:4001: start 2001.03, written 2001.06, finished 2001.07 ms
Message 2 returned: 'first
159 first
160 first
161 first
'
0.0.0.0:4000: start 3001.10, written 3001.15, finished 3001.16 ms

Using boost::asio stackless coroutines to download several files via HTTP

I translated the example from Programming in Lua by Roberto Ierusalimschy for downloading several files via HTTP using coroutines to C++ using boost::asio and stackful coroutines. Here is the code:
#include <iostream>
#include <chrono>
#include <boost/asio.hpp>
#include <boost/asio/spawn.hpp>
using namespace std;
using namespace boost::asio;
io_service ioService;
void download(const string& host, const string& file, yield_context& yield)
{
clog << "Downloading " << host << file << " ..." << endl;
size_t fileSize = 0;
boost::system::error_code ec;
ip::tcp::resolver resolver(ioService);
ip::tcp::resolver::query query(host, "80");
auto it = resolver.async_resolve(query, yield[ec]);
ip::tcp::socket socket(ioService);
socket.async_connect(*it, yield[ec]);
ostringstream req;
req << "GET " << file << " HTTP/1.0\r\n\r\n";
write(socket, buffer(req.str()));
while (true)
{
char data[8192];
size_t bytesRead = socket.async_read_some(buffer(data), yield[ec]);
if (0 == bytesRead) break;
fileSize += bytesRead;
}
socket.shutdown(ip::tcp::socket::shutdown_both);
socket.close();
clog << file << " size: " << fileSize << endl;
}
int main()
{
auto timeBegin = chrono::high_resolution_clock::now();
vector<pair<string, string>> resources =
{
{"www.w3.org", "/TR/html401/html40.txt"},
{"www.w3.org", "/TR/2002/REC-xhtml1-20020801/xhtml1.pdf"},
{"www.w3.org", "/TR/REC-html32.html"},
{"www.w3.org", "/TR/2000/REC-DOM-Level-2-Core-20001113/DOM2-Core.txt"},
};
for(const auto& res : resources)
{
spawn(ioService, [&res](yield_context yield)
{
download(res.first, res.second, yield);
});
}
ioService.run();
auto timeEnd = chrono::high_resolution_clock::now();
clog << "Time: " << chrono::duration_cast<chrono::milliseconds>(
timeEnd - timeBegin).count() << endl;
return 0;
}
Now I'm trying to translate the code to use stackless coroutines from boost::asio but the documentation is not enough for me to grok how to organize the code in such way to be able to do it. Can someone provide solution for this?
Here is a solution based on stackless coroutines as provided by Boost. Given that they are essentially a hack, I would not consider the solution particularly elegant. It could probably be done better with C++20, but I think that would be outside the scope of this question.
#include <functional>
#include <iostream>
#include <boost/asio.hpp>
#include <boost/asio/yield.hpp>
using boost::asio::async_write;
using boost::asio::buffer;
using boost::asio::error::eof;
using boost::system::error_code;
using std::placeholders::_1;
using std::placeholders::_2;
/**
* Stackless coroutine for downloading file from host.
*
* The lifetime of the object is limited to one () call. After that,
* the object will be copied and the old object is discarded. For this
* reason, the socket_ and resolver_ member are stored as shared_ptrs,
* so that they can live as long as there is a live copy. An alternative
* solution would be to manager these objects outside of the coroutine
* and to pass them here by reference.
*/
class downloader : boost::asio::coroutine {
using socket_t = boost::asio::ip::tcp::socket;
using resolver_t = boost::asio::ip::tcp::resolver;
public:
downloader(boost::asio::io_service &service, const std::string &host,
const std::string &file)
: socket_{std::make_shared<socket_t>(service)},
resolver_{std::make_shared<resolver_t>(service)}, file_{file},
host_{host} {}
void operator()(error_code ec = error_code(), std::size_t length = 0,
const resolver_t::results_type &results = {}) {
// Check if the last yield resulted in an error.
if (ec) {
if (ec != eof) {
throw boost::system::system_error{ec};
}
}
// Jump to after the previous yield.
reenter(this) {
yield {
resolver_t::query query{host_, "80"};
// Use bind to skip the length parameter not provided by async_resolve
auto result_func = std::bind(&downloader::operator(), this, _1, 0, _2);
resolver_->async_resolve(query, result_func);
}
yield socket_->async_connect(*results, *this);
yield {
std::ostringstream req;
req << "GET " << file_ << " HTTP/1.0\r\n\r\n";
async_write(*socket_, buffer(req.str()), *this);
}
while (true) {
yield {
char data[8192];
socket_->async_read_some(buffer(data), *this);
}
if (length == 0) {
break;
}
fileSize_ += length;
}
std::cout << file_ << " size: " << fileSize_ << std::endl;
socket_->shutdown(socket_t::shutdown_both);
socket_->close();
}
// Uncomment this to show progress and to demonstrace interleaving
// std::cout << file_ << " size: " << fileSize_ << std::endl;
}
private:
std::shared_ptr<socket_t> socket_;
std::shared_ptr<resolver_t> resolver_;
const std::string file_;
const std::string host_;
size_t fileSize_{};
};
int main() {
auto timeBegin = std::chrono::high_resolution_clock::now();
try {
boost::asio::io_service service;
std::vector<std::pair<std::string, std::string>> resources = {
{"www.w3.org", "/TR/html401/html40.txt"},
{"www.w3.org", "/TR/2002/REC-xhtml1-20020801/xhtml1.pdf"},
{"www.w3.org", "/TR/REC-html32.html"},
{"www.w3.org", "/TR/2000/REC-DOM-Level-2-Core-20001113/DOM2-Core.txt"},
};
std::vector<downloader> downloaders{};
std::transform(resources.begin(), resources.end(),
std::back_inserter(downloaders), [&](auto &x) {
return downloader{service, x.first, x.second};
});
std::for_each(downloaders.begin(), downloaders.end(),
[](auto &dl) { dl(); });
service.run();
} catch (std::exception &e) {
std::cerr << "exception: " << e.what() << "\n";
}
auto timeEnd = std::chrono::high_resolution_clock::now();
std::cout << "Time: "
<< std::chrono::duration_cast<std::chrono::milliseconds>(timeEnd -
timeBegin)
.count()
<< std::endl;
return 0;
}
Compiled with Boost 1.72 and g++ -lboost_coroutine -lpthread test.cpp. Example output:
$ ./a.out
/TR/REC-html32.html size: 606
/TR/html401/html40.txt size: 629
/TR/2002/REC-xhtml1-20020801/xhtml1.pdf size: 115777
/TR/2000/REC-DOM-Level-2-Core-20001113/DOM2-Core.txt size: 229699
Time: 1644
The log line at the end of the () function can be uncommented to demonstrate the interleaving of the downloads.

Is already in c++11 or boost thread monitor?

Is already in c++11 or boost thread monitor ?
I need to monitor thread execution and when one fails for any reason I need to start again.
I am using in c++11.
This depends on what constitutes a thread failure. If you mean it could exit, you can package it up:
Let's pretend we have a "long-running" task with a 25% chance of failing midway:
int my_processing_task() // this can randomly fail
{
static const size_t iterations = 1ul << 6;
static const size_t mtbf = iterations << 2; // 25% chance of failure
static auto odds = bind(uniform_int_distribution<size_t>(0, mtbf), mt19937(time(NULL)));
for(size_t iteration = 0; iteration < iterations; ++iteration)
{
// long task
this_thread::sleep_for(chrono::milliseconds(10));
// that could fail
if (odds() == 37)
throw my_failure();
}
// we succeeded!
return 42;
}
If we want to keep running the task, regardless of whether it completed normally, or with an error, we can write a monitoring wrapper:
template <typename F> void monitor_task_loop(F f)
{
while (!shutdown)
try {
f();
++completions;
} catch (exception const& e)
{
std::cout << "handling: '" << e.what() << "'\n";
++failures;
}
std::cout << "shutdown requested\n";
}
In this case, I randomly thought it would be nice to count the number of regular completions, and the number of failures. The shutdown flag enables the thread to be shutdown:
auto timeout = async(launch::async, []{ this_thread::sleep_for(chrono::seconds(3)); shutdown = true; });
monitor_task_loop(my_processing_task);
Will run the task montoring loop for ~3 seconds. A demonstration running three background threads monitoring our task is Live On Coliru.
Added a c++03 version using Boost Live On Coliru.
This version uses only standard c++11 features.
#include <thread>
#include <future>
#include <iostream>
#include <random>
using namespace std;
struct my_failure : virtual std::exception {
char const* what() const noexcept { return "the thread failed randomly"; }
};
int my_processing_task() // this can randomly fail
{
static const size_t iterations = 1ul << 4;
static const size_t mtbf = iterations << 2; // 25% chance of failure
static auto odds = bind(uniform_int_distribution<size_t>(0, mtbf), mt19937(time(NULL)));
for(size_t iteration = 0; iteration < iterations; ++iteration)
{
// long task
this_thread::sleep_for(chrono::milliseconds(10));
// that could fail
if (odds() == 37)
throw my_failure();
}
// we succeeded!
return 42;
}
std::atomic_bool shutdown(false);
std::atomic_size_t failures(0), completions(0);
template <typename F> void monitor_task_loop(F f)
{
while (!shutdown)
try {
f();
++completions;
} catch (exception const& e)
{
std::cout << "handling: '" << e.what() << "'\n";
++failures;
}
std::cout << "shutdown requested\n";
}
int main()
{
auto monitor = [] { monitor_task_loop(my_processing_task); };
thread t1(monitor), t2(monitor), t3(monitor);
this_thread::sleep_for(chrono::seconds(3));
shutdown = true;
t1.join();
t2.join();
t3.join();
std::cout << "completions: " << completions << ", failures: " << failures << "\n";
}