Is it possible to build a concurrent interprocess message queue using boost? - c++

I am trying to build an application which have multiple processes. These processes need to write concurrently through the same message queue. At the other side, there will be just one process reading that queue.
Is that possible using boost? Or do I have to implement that mutual exclusion?
I took a look at the example source code but it is not working properly for my needs. I don't know if I'm missing something.
This is the code on the client:
#include <boost/interprocess/ipc/message_queue.hpp>
#include <iostream>
#include <vector>
#include <unistd.h>
using namespace boost::interprocess;
int main ()
{
try{
//Erase previous message queue
//message_queue::remove("message_queue");
//Create a message_queue.
message_queue mq
(open_or_create //only create
,"message_queue" //name
,100 //max message number
,sizeof(int) //max message size
);
//Send 100 numbers
for(int i = 0; i < 100; ++i){
printf("Sending: %d\n", i);
usleep(1000000);
mq.send(&i, sizeof(i), 0);
}
}
catch(interprocess_exception &ex){
std::cout << ex.what() << std::endl;
return 1;
}
return 0;
}
And server code:
#include <boost/interprocess/ipc/message_queue.hpp>
#include <iostream>
#include <vector>
using namespace std;
using namespace boost::interprocess;
int main ()
{
try{
//message_queue::remove("message_queue");
//Open a message queue.
message_queue mq
(open_only //only create
,"message_queue" //name
);
unsigned int priority;
message_queue::size_type recvd_size;
//Receive 100 numbers
for(int i = 0; i < 100; ++i){
int number;
mq.receive(&number, sizeof(number), recvd_size, priority);
if(number != i || recvd_size != sizeof(number))
return 1;
cout << number << endl;
}
}
catch(interprocess_exception &ex){
message_queue::remove("message_queue");
std::cout << ex.what() << std::endl;
return 1;
}
//message_queue::remove("message_queue");
return 0;
}
Thanks in advance.

The given examples for boost::interprocess::message_queue work for me. These classes are already thread-safe, so intra-process threads are not a problem.
Here's a full example of a shared message queue. Let me know if you have trouble using it.
shared_mq.hpp:
#include <boost/interprocess/ipc/message_queue.hpp>
// could easily be made a template; make sure T is a POD!
class shared_mq {
public:
shared_mq(const char* const name,
const unsigned max_queue_size) :
shared_mq{ name, max_queue_size, delete_queue(name) }
{}
shared_mq(const char* const name) :
mq_{ boost::interprocess::open_only, name }
{}
void send(int i) {
mq_.send(&i, sizeof(i), 0 /* priority */);
}
int receive() {
int result;
boost::interprocess::message_queue::size_type recvsize;
unsigned recvpriority;
mq_.receive(&result, sizeof(result), recvsize, recvpriority);
return result;
}
private:
struct did_delete_t {};
did_delete_t delete_queue(const char* const name) {
boost::interprocess::message_queue::remove(name);
return did_delete_t{};
}
shared_mq(const char* const name,
const unsigned max_queue_size,
did_delete_t) :
mq_ { boost::interprocess::create_only, name, max_queue_size, sizeof(int) }
{}
boost::interprocess::message_queue mq_;
};
client.cpp:
#include <iostream>
#include <random>
#include <thread>
#include "shared_mq.hpp"
void send_ints(shared_mq& mq, const unsigned count) {
std::random_device rd;
std::mt19937 mt{ rd() };
std::uniform_int_distribution<int> dist{0, 10000};
for (unsigned i = 0; i != count; ++i) {
mq.send(dist(mt));
}
}
int main ()
{
std::cout << "Starting client." << std::endl;
try {
std::cout << "Creating queue..." << std::endl;
constexpr unsigned kQueueSize = 100;
shared_mq mq{ "my_queue", kQueueSize };
std::cout << "Sending ints..." << std::endl;
std::thread t1{ send_ints, std::ref(mq), 25};
std::thread t2{ send_ints, std::ref(mq), 25};
t1.join();
t2.join();
mq.send(-1); // magic sentinel value
}
catch (boost::interprocess::interprocess_exception& ex) {
std::cerr << ex.what() << std::endl;
return 1;
}
std::cout << "Finished client." << std::endl;
return 0;
}
server.cpp:
#include <iostream>
#include "shared_mq.hpp"
int main ()
{
std::cout << "Starting server." << std::endl;
try {
std::cout << "Opening queue..." << std::endl;
shared_mq mq{ "my_queue" };
std::cout << "Receiving ints..." << std::endl;
for (;;) {
const int x = mq.receive();
if (x == -1) {
// magic sentinel value
break;
}
std::cout << "Received: " << x << std::endl;
}
}
catch (boost::interprocess::interprocess_exception& ex) {
std::cerr << ex.what() << std::endl;
return 1;
}
std::cout << "Finished server." << std::endl;
return 0;
}

Related

getting "input stream error" when trying to desirealize the object using boost::serialization and arcive

I'm trying to send a class over boost::message queue using boost::serialization, boost::Arcive, and boost::split members (load and save)
the problem is when I'm trying to deserialize I'm getting the input stream error exception
#include <iostream>
#include <boost/interprocess/shared_memory_object.hpp>
#include <boost/interprocess/ipc/message_queue.hpp>
#include <boost/version.hpp>
#include <random>
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/serialization/access.hpp>
#include <boost/archive/impl/basic_text_oarchive.ipp>
#include <boost/archive/impl/text_oarchive_impl.ipp>
#include <boost/archive/impl/text_iarchive_impl.ipp>
#include <boost/serialization/split_member.hpp>
class Data{
public:
int a_ ;
double b_ ;
std::string s ;
template<class Archive>
void serialize(Archive & ar, const unsigned int version) {
boost::serialization::split_member(ar, *this, version);
}
template<class Archive> void save(Archive & ar, unsigned int version) const {
// ar << order_request_type_;
ar << a_;
ar << b_;
ar << s; }
template<class Archive> void load(Archive & ar, unsigned int version) {
// ar >> order_request_type_;
ar >> a_;
ar >> b_;
ar >> s;
}
private:
// friend class boost::archive::access;
friend class boost::archive::save_access;
};
[[nodiscard]]bool RunChiled() {
using namespace boost::interprocess; try {
message_queue mq(open_only //open or create
, "message_queue" //name
);
unsigned int priority = 0;
message_queue::size_type recvd_size;
Data d;
std::stringstream iss;
std::string serialized_string;
serialized_string.resize(150);
long long number = 0;
while(true)
{
mq.receive(&serialized_string[0], 150, recvd_size , priority);
std::cout << serialized_string << "\n";
iss << serialized_string;
try{
boost::archive::text_iarchive ia(iss); // <-- getting the exception
ia >> d;
} catch (const std::exception& ex) {
std::cout << ex.what() << "\n";
}
number++;
std::cout << d.a_ << " " << d.b_ << " " << d.s << "\n";
} }catch(const interprocess_exception &ex) {
message_queue::remove("message_queue");
std::cout << "interprocess_exception " << ex.what() << std::endl;
return 1; } catch (const std::exception& e) {
std::cout << "exception " << e.what() << std::endl;
message_queue::remove("message_queue");
return 1; }
message_queue::remove("message_queue"); return true; }
int main() { std::cout << "1\n"; std::default_random_engine generator; std::uniform_real_distribution<double> distribution(0,15);
using namespace std;
cout << "Boost version: " << BOOST_LIB_VERSION << endl; using namespace boost::interprocess; message_queue::remove("message_queue"); auto pid = fork();
if(pid > 0) {
std::cout << "2\n";
sleep(2);
try {
auto res = RunChiled();
std::cout << res;
} catch (...) {
std::cout << "error\n";
}
} else if(pid == 0) {
try{
boost::interprocess::message_queue mq(create_only,"message_queue", 100, 150);
std::stringstream oss;
Data request;
request.b_ = 17.5;
request.a_ = I;
request.s = to_string(17.5) + " " + to_string(i);
try {
boost::archive::text_oarchive oa(oss);
oa << request;
} catch (const std::exception& e) {
std::cout << "serialzation:" << e.what() ;
}
try{
// std::cout << "oss " << oss.str() << "\n";
std::string serialized_string(oss.str());
std::cout << "serialized_string " << oss.str().size() << "\n";
mq.send(&serialized_string, serialized_string.size(), 0);
}catch(const std::exception& e){
std::cout << "\n send exeption " << e.what() << "\n";
}
}
}catch (const std::exception& e){
message_queue::remove("message_queue");
std::cout << e.what() ;
}
}
return 0;
}
A number of big issues.
Firstly
mq.send(&serialized_string, serialized_string.size(), 0);
That's Undefined
Behaviour because
serialzed_string isn't POD and the size doesn't match either. You
probbably meant something like on the receive side:
mq.send(serialized_string.data(), serialized_string.size(), 0);
You're resizing your serialized_string message to 150, and never
back to the actual size. This means it will not work correctly as
there will be trailing data.
Fixing it:
std::string buffer(buffer_size, '\0');
unsigned int priority = 0;
message_queue::size_type recvd_size;
mq.receive(buffer.data(), buffer.size(), recvd_size, priority);
buffer.resize(recvd_size);
Other Notes
The serialization can be simpler without splitting:
class Data {
public:
int a_;
double b_;
std::string s;
template <class Archive> void serialize(Archive& ar, unsigned) {
ar & a_ & b_ & s;
}
private:
friend class boost::serialization::access; // not required
};
Other notes
return 1 and return true from bool runChiled look iffy - one of them is probably a bug
[[nodiscard]] seems a little bit tricky on a function that normally will not return ([[noreturn]] might be more apt, optionally just passing exceptions out?)
you will want to seed your random generator with something actually random:
std::default_random_engine generator { std::random_device{}() };
Live Demo With Fixes
Live On Coliru
Live On Wandbox
#include <iostream>
#include <iomanip>
#include <boost/interprocess/shared_memory_object.hpp>
#include <boost/interprocess/ipc/message_queue.hpp>
#include <boost/date_time.hpp>
#include <boost/core/demangle.hpp>
#include <boost/version.hpp>
#include <random>
#include <thread> // this_thread
#include <boost/archive/text_oarchive.hpp>
#include <boost/archive/text_iarchive.hpp>
#include <boost/serialization/access.hpp>
#include <boost/serialization/split_member.hpp>
namespace {
namespace bip = boost::interprocess;
using bip::message_queue;
auto constexpr queuename = "message_queue";
auto constexpr max_queued = 5;
auto constexpr buffer_size = 150;
auto sleep_for = [](auto d) { std::this_thread::sleep_for(d); };
using namespace std::chrono_literals;
}
class Data {
public:
int a_{};
double b_{};
std::string s;
template <class Ar> void serialize(Ar& ar, unsigned /*unused*/) {
ar& a_& b_& s;
}
};
[[noreturn]] void RunChild()
{
std::cout << "2" << std::endl;
sleep_for(2s);
try {
message_queue mq(bip::open_only,queuename);
size_t number = 0;
for (std::string buffer(buffer_size, '\0');; buffer.resize(buffer_size)) {
{
unsigned int priority = 0;
message_queue::size_type recvd_size = 0;
#ifdef COLIRU
// make the process terminate for online compiler
{
using namespace boost::posix_time;
auto deadline = second_clock::universal_time() + seconds(3);
if (not mq.timed_receive(buffer.data(), buffer.size(),
recvd_size, priority, deadline))
{
throw std::runtime_error("no more messages");
}
}
#else
mq.receive(buffer.data(), buffer.size(), recvd_size, priority);
#endif
buffer.resize(recvd_size);
}
//std::cout << buffer << std::endl;
Data d;
try {
std::stringstream iss(buffer);
boost::archive::text_iarchive ia(iss);
ia >> d;
} catch (const std::exception& ex) {
std::cout << ex.what() << std::endl;
}
++number;
std::cout << "Received: " << d.a_ << " " << d.b_ << " "
<< std::quoted(d.s) << std::endl;
}
} catch (const std::exception& e) {
std::cout << boost::core::demangle(typeid(e).name()) << " " << e.what()
<< std::endl;
message_queue::remove(queuename);
throw; // re-raise
}
message_queue::remove(queuename);
}
static void RunParent()
{
std::cout << "1" << std::endl;
std::default_random_engine generator { std::random_device{}() };
std::uniform_real_distribution<double> distribution(0, 15);
message_queue mq(bip::create_only, queuename, max_queued, buffer_size);
for (auto i = 0; i < 10; ++i) {
auto value = distribution(generator);
Data const request {
i,
value,
std::to_string(value) + " " + std::to_string(i)
};
std::stringstream oss;
try {
boost::archive::text_oarchive oa(oss);
oa << request;
std::string buffer = std::move(oss).str();
std::cout << "Sending " << buffer.size() << " bytes" << std::endl;
mq.send(buffer.data(), buffer.size(), 0);
} catch (const std::exception& e) {
std::cout << "\nsend exeption " << e.what() << std::endl;
}
}
}
int main() {
std::cout << "Boost version: " << BOOST_LIB_VERSION << std::endl;
message_queue::remove(queuename);
try {
if (auto pid = ::fork(); pid > 0) {
RunChild();
} else if (pid == 0) {
RunParent();
}
} catch (const std::exception& e) {
std::cout << e.what() << std::endl;
message_queue::remove(queuename);
}
}
Prints
Boost version: 1_75
2
1
Sending 72 bytes
Sending 73 bytes
Sending 72 bytes
Sending 72 bytes
Sending 73 bytes
Sending 72 bytes
Sending 72 bytes
Received: 0 1.12642 "1.126420 0"
Received: 1 14.2474 "14.247412 1"
Received: 2 3.22163 "3.221631 2"
Sending 73 bytes
Sending 72 bytes
Received: 3 3.20471 "3.204709 3"
Sending 72 bytes
Received: 4 10.7838 "10.783761 4"
Received: 5 5.74063 "5.740629 5"
Received: 6 6.98008 "6.980078 6"
Received: 7 11.6643 "11.664257 7"
Received: 8 3.80561 "3.805614 8"
Received: 9 7.79641 "7.796408 9"
std::runtime_error no more messages
no more messages

Sharing a class between threads

I have a class called "Vector", which by default holds 10.000 elements, which at all times must have the same value. This class is tested and works. Therefore I use the method setAndTest() from the class to set the value of all elements, which then immediately checks whether the Vector object is consistent (that all vector elements hold the same value).
In a new file "main.cpp", i have created two functions: writer() and main().
writer() creates a user-defined number of writer threads (between 1 & 100), each with their own unique id. Each writer sets and tests the shared Vector object to its id every second. If a writer detects an inconsistensy in a shared Vector object, setAndTest() returns false and the following error message should be printed: Error with thread #id
However, in 99% of the cases it prints Success with thread #id, whereas I expected that there would be more variation between the two.
Headers included in main.cpp file:
#include <iostream>
#include "Vector.hpp"
#include <pthread.h>
#include <unistd.h>
using namespace std;
Vector object and writer() function:
Vector VecObj; //The Vector object (Defined in global scope)
void* writer(void *threadid)
{
int threadid_ = *(int *)(threadid);
if(VecObj.setAndTest(threadid_))
{
std::cout << "\nSuccess with thread " << threadid_ << endl;
}else
{
std::cout << "\nError with thread " << threadid_ << endl;
}
return NULL;
}
main function:
int main()
{
start:
int numOfThreads = 1;
std::cout << "Enter amount of threads (must be between 1 & 100): ";
std::cin >> numOfThreads;
if(0 < numOfThreads && numOfThreads <= 100){
std::cout << "You entered " << numOfThreads << " threads" << endl;
}else{
std::cout << "Amount of threads must be between 1 & 100" << endl;
goto start;
}
pthread_t threadcreator[numOfThreads];
for(int i = 0; i < numOfThreads; i++){
pthread_create(&threadcreator[i], NULL, writer, &i);
sleep(1);
}
for(int i = 0; i < numOfThreads; i++){
pthread_join(threadcreator[i], NULL);
}
}
Vector Class (Vector.hpp):
#ifndef VECTOR_HPP_
#define VECTOR_HPP_
#include <pthread.h>
using namespace std;
//=======================================================
// Class: Vector
// contains a size_-size vector of integers.
// Use the function setAndTest to set all elements
// of the vector to a certain value and then test that
// the value is indeed correctly set
//=======================================================
class Vector
{
public:
Vector(unsigned int size = 10000) : size_(size)
{
vector_ = new int[size_];
set(0);
}
~Vector()
{
delete[] vector_;
}
bool setAndTest(int n)
{
set(n);
return test(n);
}
private:
void set(int n)
{
for(unsigned int i=0; i<size_; i++) vector_[i] = n;
}
bool test(int n)
{
for(unsigned int i=0; i<size_; i++) if(vector_[i] != n) return false;
return true;
}
int* vector_;
unsigned int size_;
};
#endif
You are passing each thread a pointer to the same int variable. That variable changes value on each loop iteration. writer() is expecting to receive the same int value that was given to pthread_create(), but that is not guaranteed in your code, even with the sleep() call.
To pass the int correctly, pass the actual int value itself rather than a pointer to the int, eg:
#include <iostream>
#include <vector>
#include <cstdint>
#include <pthread.h>
#include "Vector.hpp"
Vector VecObj;
void* writer(void *arg)
{
int threadid_ = static_cast<int>(reinterpret_cast<intptr_t>(arg));
if (VecObj.setAndTest(threadid_))
{
std::cout << "\nSuccess with thread " << threadid_ << std::endl;
}
else
{
std::cout << "\nError with thread " << threadid_ << std::endl;
}
return NULL;
}
int main()
{
int numOfThreads = 0;
do {
std::cout << "Enter amount of threads (must be between 1 & 100): ";
std::cin >> numOfThreads;
if (0 < numOfThreads && numOfThreads <= 100){
std::cout << "You entered " << numOfThreads << " threads" << std::endl;
break;
}
std::cout << "Amount of threads must be between 1 & 100" << std::endl;
}
while (true);
std::vector<pthread_t> threadcreator(numOfThreads);
for(int i = 0; i < numOfThreads; i++){
pthread_create(&threadcreator[i], NULL, writer, reinterpret_cast<void*>(i));
}
for(int i = 0; i < numOfThreads; i++){
pthread_join(threadcreator[i], NULL);
}
return 0;
}
If you really want to use int* pointers, then you will have to allocate a separate int for each thread, eg:
#include <iostream>
#include <vector>
#include <pthread.h>
#include "Vector.hpp"
Vector VecObj;
void* writer(void *arg)
{
int threadid_ = *static_cast<int*>(arg);
if (VecObj.setAndTest(threadid_))
{
std::cout << "\nSuccess with thread " << threadid_ << std::endl;
}
else
{
std::cout << "\nError with thread " << threadid_ << std::endl;
}
return NULL;
}
int main()
{
int numOfThreads = 0;
do {
std::cout << "Enter amount of threads (must be between 1 & 100): ";
std::cin >> numOfThreads;
if (0 < numOfThreads && numOfThreads <= 100){
std::cout << "You entered " << numOfThreads << " threads" << std::endl;
break;
}
std::cout << "Amount of threads must be between 1 & 100" << std::endl;
}
while (true);
std::vector<pthread_t> threadcreator(numOfThreads);
std::vector<int> threadids(numOfThreads);
for(int i = 0; i < numOfThreads; i++){
threadids[i] = i;
pthread_create(&threadcreator[i], NULL, writer, &threadids[i]);
}
for(int i = 0; i < numOfThreads; i++){
pthread_join(threadcreator[i], NULL);
}
return 0;
}
Or, if you really want to pass an int* pointer to a single int, use a std::conditional_variable or other waitable signal to make sure that each thread has actually captured the int value before allowing the loop to change its value, eg:
#include <iostream>
#include <vector>
#include <conditional_variable>
#include <mutex>
#include "Vector.hpp"
#include <pthread.h>
Vector VecObj;
std::condition_variable cv;
std::mutex cv_m;
bool captured = false;
void* writer(void *arg)
{
int threadid_;
{
std::lock_guard<std::mutex> lk(cv_m);
threadid_ = *static_cast<int*>(arg);
captured = true;
}
cv.notify_one();
if (VecObj.setAndTest(threadid_))
{
std::cout << "\nSuccess with thread " << threadid_ << std::endl;
}
else
{
std::cout << "\nError with thread " << threadid_ << std::endl;
}
return NULL;
}
int main()
{
int numOfThreads = 0;
do {
std::cout << "Enter amount of threads (must be between 1 & 100): ";
std::cin >> numOfThreads;
if (0 < numOfThreads && numOfThreads <= 100){
std::cout << "You entered " << numOfThreads << " threads" << std::endl;
break;
}
std::cout << "Amount of threads must be between 1 & 100" << std::endl;
}
while (true);
std::vector<pthread_t> threadcreator(numOfThreads);
for(int i = 0; i < numOfThreads; i++){
std::unique_lock<std::mutex> lk(cv_m);
captured = false;
pthread_create(&threadcreator[i], NULL, writer, &i);
cv.wait(lk, [](){ return captured; });
}
for(int i = 0; i < numOfThreads; i++){
pthread_join(threadcreator[i], NULL);
}
return 0;
}
UPDATE: oh, now I see another major problem. You have multiple threads writing to, and reading from, a single Vector object in memory without synchronization. That is not safe to do. While one thread is reading from an element in the Vector's array, another thread can be writing a new value to that same element, and there is no guarantee that the element will remain consistent across both operations. You MUST synchronize access to the Vector object since it is being shared across multiple threads, eg:
...
#include <mutex>
...
Vector VecObj;
std::mutex vec_m;
...
void* writer(void *threadid)
{
int threadid_ = ...;
bool testResult;
{
std::lock_guard lk(vec_m);
testResult = VecObj.setAndTest(threadid_);
}
if (testResult)
{
std::cout << "\nSuccess with thread " << threadid_ << std::endl;
}
else
{
std::cout << "\nError with thread " << threadid_ << std::endl;
}
return NULL;
}
...

wait_until timeout causing only one thread to terminate, unable to catch all thread timeouts

I've made a simple thread-safe Buffer implementation, creating 10 threads to work on the buffer queue to push and pop randomly some numbers. My implementation should let threads that are waiting to pop to wait only for 3 seconds and then terminate. When that occurs I print a timeout message.
The problem is that only one timeout message is printed, the main will then join all threads and return. Why?
Here is the code, main.cpp
#include <thread>
#include <vector>
#include <iostream>
#include <sstream>
#include "Buffer.h"
int main() {
std::vector<std::thread> workers;
Buffer<std::string> buffer(3);
srandom(time(NULL));
for (int i = 0; i < 10; i++) {
workers.emplace_back([&buffer]{
long num = random();
if(num%2==0) {
std::stringstream msg;
msg << std::this_thread::get_id() << " pushing " << num << std::endl;
std::cout << msg.str();
buffer.push(std::to_string(num));
} else {
std::stringstream msg1;
msg1 << std::this_thread::get_id() << " waiting to pop" << std::endl;
std::cout << msg1.str();
std::string popped_string = buffer.pop();
std::stringstream msg2;
msg2 << std::this_thread::get_id() << " popped " << popped_string << std::endl;
std::cout << msg2.str();
}
});
}
for (auto &w: workers) {
if (w.joinable()) w.join();
}
return 0;
}
Buffer.h
#ifndef PDS_CPP_BUFFER_H
#define PDS_CPP_BUFFER_H
#include <queue>
#include <mutex>
#include <condition_variable>
template <class T>
class Buffer {
private:
std::queue<T> queue;
std::mutex mutex;
std::condition_variable cv;
std::chrono::seconds sec;
public:
Buffer(int time) : sec(time), queue() {};
void push(T object) {
std::lock_guard lockGuard(mutex);
this->queue.push(object);
this->cv.notify_one();
}
T pop() {
std::unique_lock uniqueLock(mutex);
// this->cv.wait(uniqueLock, [this]{ return !this->queue.empty(); });
if(this->cv.wait_for(uniqueLock, this->sec, [this]{ return !this->queue.empty(); })) {
} else {
std::stringstream msg;
msg << std::this_thread::get_id() << " timeout" << std::endl;
std::cout << msg.str();
}
T object = this->queue.front();
this->queue.pop();
uniqueLock.unlock();
return object;
}
};
#endif //PDS_CPP_BUFFER_H

What is the best way to send many buffers with Boost::Asio method async_send_to

What ist he best way to send many buffers with Boost::Asio method async_send_to?
And this whole send procedure can be repeated at any time. And furthermore I want to determine the (correct) elapsed time of each send procedure.
I tried in this way:
//MainWindow.h
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
explicit MainWindow(QWidget *parent = 0);
~MainWindow();
private slots:
void on_connectPushButton_clicked();
void on_asyncSendPushButton_clicked();
private:
Ui::MainWindow *ui;
QTime m_Timer;
int m_BufferSize;
int m_NumBuffersToSend;
int m_TransferredBuffers;
boost::asio::io_service m_IOService;
std::unique_ptr<boost::asio::ip::udp::socket> m_pSocket;
boost::asio::ip::udp::endpoint m_ReceiverEndpoint;
void handle_send(const boost::system::error_code& error, std::size_t size);
void stopTimerAndLog();
};
//MainWindow.cpp
#include "MainWindow.h"
#include "ui_MainWindow.h"
//Some Qt includes
#include <boost/timer/timer.hpp>
#include <boost/array.hpp>
#include <boost/bind.hpp>
using boost::asio::ip::udp;
MainWindow::MainWindow(QWidget *parent) :
m_BufferSize(0),
m_NumBuffersToSend(0),
m_TransferredBuffers(0),
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);
}
MainWindow::~MainWindow()
{
delete ui;
}
void MainWindow::on_connectPushButton_clicked()
{
try
{
udp::resolver resolver(m_IOService);
udp::resolver::query query(udp::v4(), ui->serverIpAddressLineEdit->text().toStdString(),
ui->serverPortLineEdit->text().toStdString());
m_ReceiverEndpoint = *resolver.resolve(query);
m_pSocket = std::unique_ptr<boost::asio::ip::udp::socket>(new boost::asio::ip::udp::socket(m_IOService));
m_pSocket->open(udp::v4());
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
void MainWindow::stopTimerAndLog()
{
int tmm = m_Timer.elapsed();
double mBitPerSecond = 1000.0 * static_cast<double>(m_BufferSize * m_NumBuffersToSend)
/ ( 1024.0 * 1024.0 * tmm) * 8.0;
LOG_INFO(__FUNCTION__ << ": " << QString("Buffer size: %1").arg(m_BufferSize).toStdString());
LOG_INFO(__FUNCTION__ << ": " << QString("Num Buffers: %1").arg(m_NumBuffersToSend).toStdString());
LOG_INFO(__FUNCTION__ << ": " << QString("Time: %1 ms").arg(tmm).toStdString());
LOG_INFO(__FUNCTION__ << ": " << QString("%1 MBit/s").arg(mBitPerSecond).toStdString());
ui->mBitperSecondDoubleSpinBox->setValue(mBitPerSecond);
}
void MainWindow::handle_send(const boost::system::error_code &error, size_t size)
{
m_TransferredBuffers++;
if (error)
{
//missing error propagation to main thread
LOG_ERROR(__FUNCTION__ << ": ERROR: Client error while sending (error code = " << error << "): ");
LOG_ERROR(__FUNCTION__ << ": ERROR: Recovering...");
}
if ( m_TransferredBuffers >= m_NumBuffersToSend )
{
stopTimerAndLog();
m_IOService.stop();
}
}
void MainWindow::on_asyncSendPushButton_clicked()
{
try
{
m_BufferSize = ui->sendBufferSpinBox->value();
char* data = new char[m_BufferSize];
memset(data, 0, m_BufferSize);
m_NumBuffersToSend = ui->numBufferSpinBox->value();
m_Timer.start();
for (int i=0; i < m_NumBuffersToSend; i++)
{
memset(data, i, m_BufferSize);
m_pSocket->async_send_to(boost::asio::buffer(data, m_BufferSize),
m_ReceiverEndpoint,
boost::bind(&MainWindow::handle_send, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred));
}
m_TransferredBuffers = 0;
m_IOService.run();
delete[] data;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
As you can see, the user can click on the connect button (on_connectPushButton_clicked). And then the send procedure starts by clicking on the async send button (on_asyncSendPushButton_clicked). And here I start the timer and call m_NumBuffersToSend times the async_send_to method. Then I run the IOService. For each async_send_to the handler handle_send will be called And the m_TransferredBuffers variable will be incremented until it reaches m_NumBuffersToSend. If this is the case, I stop the timer and the IOService.
But if I compare the time which was calculated in my program with the real sent udp’s with Wireshark there is always a big difference. How can I have a more accurate time calculation?
Is it possible to place the m_IOService.run(); call outside on_asyncSendPushButton_clicked?
Well.
I'm not sure what you are observing. Here's the answer to
Q. Is it possible to place the m_IOService.run(); call outside on_asyncSendPushButton_clicked
Yes, you should use io_service::work to keep the IO service running. Here's a demo program:
Live On Coliru
I've created a single IO thread to serve the async operations/completion handlers
I've stripped the Qt dependency; demo Runs are configured randomly:
struct Run {
std::vector<char> buffer = std::vector<char>(rand()%800 + 200, '\0');
int remainingToSend = rand()%10 + 1;
int transferredBuffers = 0;
Clock::time_point start = Clock::now();
void stopTimerAndLog() const;
};
As a bonus, I added proper statistics using Boost Accumulators
Instead of doing (expensive) IO in stopTimerAndLog we add the samples to the accumulators:
void stopTimerAndLog()
{
using namespace std::chrono;
Clock::duration const elapsed = Clock::now() - start;
int tmm = duration_cast<microseconds>(elapsed).count();
double mBitPerSecond = tmm
? buffer.size() * transferredBuffers * 8.0 / 1024 / 1024 / (tmm / 1000000.0)
: std::numeric_limits<double>::infinity();
std::lock_guard<std::mutex> lk(demo_results::mx);
demo_results::bufsize(buffer.size());
demo_results::micros(tmm);
if (tmm)
demo_results::mbps(mBitPerSecond);
}
You can run multiple demo Runs in overlap:
Demo demo;
demo.on_connect(argv[1], argv[2]);
for (int i = 0; i<100; ++i) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
demo.on_async_testrun();
}
// Demo destructor joins IO thread, making sure all stats are final
The mutex guarding the statistics is redundant but GoodPractive(TM) since you may want to test with multiple IO threads
Output:
avg. Buffer size: 613.82, std.dev. 219.789
avg. b/w: 160.61 mbps, std.dev. 81.061
avg. time: 153.64 μs, std.dev. 39.0163
Full Listing
#include <boost/asio.hpp>
#include <boost/array.hpp>
#include <boost/make_shared.hpp>
#include <boost/bind.hpp>
#include <thread>
#include <mutex>
#include <chrono>
#include <memory>
#include <iostream>
#include <boost/accumulators/accumulators.hpp>
#include <boost/accumulators/statistics.hpp>
using boost::asio::ip::udp;
typedef std::chrono::high_resolution_clock Clock;
namespace demo_results {
using namespace boost::accumulators;
static std::mutex mx;
accumulator_set<double, stats<tag::mean, tag::median, tag::variance> > bufsize, mbps, micros;
}
struct Run {
std::vector<char> buffer = std::vector<char>(rand()%800 + 200, '\0');
int remainingToSend = rand()%10 + 1;
int transferredBuffers = 0;
Clock::time_point start = Clock::now();
Clock::duration elapsed;
void stopTimerAndLog()
{
using namespace std::chrono;
Clock::duration const elapsed = Clock::now() - start;
int tmm = duration_cast<microseconds>(elapsed).count();
double mBitPerSecond = tmm
? buffer.size() * transferredBuffers * 8.0 / 1024 / 1024 / (tmm / 1000000.0)
: std::numeric_limits<double>::infinity();
std::lock_guard<std::mutex> lk(demo_results::mx);
demo_results::bufsize(buffer.size());
demo_results::micros(tmm);
if (tmm)
demo_results::mbps(mBitPerSecond);
#if 0
std::cout << __FUNCTION__ << " -----------------------------------------------\n";
std::cout << __FUNCTION__ << ": " << "Buffer size: " << buffer.size() << "\n";
std::cout << __FUNCTION__ << ": " << "Num Buffers: " << transferredBuffers << "\n";
std::cout << __FUNCTION__ << ": " << "Time: " << tmm << " μs\n";
std::cout << __FUNCTION__ << ": " << mBitPerSecond << " MBit/s\n";
#endif
}
typedef boost::shared_ptr<Run> Ptr;
};
struct Demo {
boost::asio::io_service m_IOService;
std::unique_ptr<boost::asio::io_service::work> m_work;
std::unique_ptr<boost::asio::ip::udp::socket> m_pSocket;
boost::asio::ip::udp::endpoint m_ReceiverEndpoint;
std::thread m_io_thread;
Demo() :
m_IOService(),
m_work(new boost::asio::io_service::work(m_IOService)),
m_io_thread([this] { m_IOService.run(); })
{
}
~Demo() {
m_work.reset();
m_io_thread.join();
}
void on_connect(std::string const& host, std::string const& port)
{
try {
udp::resolver resolver(m_IOService);
m_ReceiverEndpoint = *resolver.resolve(udp::resolver::query(udp::v4(), host, port));
m_pSocket = std::unique_ptr<boost::asio::ip::udp::socket>(new boost::asio::ip::udp::socket(m_IOService));
m_pSocket->open(udp::v4());
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
void perform_run(Run::Ptr state) {
if (state->remainingToSend) {
std::fill(state->buffer.begin(), state->buffer.end(), state->remainingToSend);
m_pSocket->async_send_to(boost::asio::buffer(state->buffer),
m_ReceiverEndpoint,
boost::bind(&Demo::handle_sent, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred,
state));
} else {
state->stopTimerAndLog();
}
}
void handle_sent(boost::system::error_code const&error, size_t actually_transferred, Run::Ptr state)
{
assert(actually_transferred == state->buffer.size());
state->transferredBuffers += 1;
state->remainingToSend -= 1;
if (error) {
// missing error propagation to main thread
std::cerr << __FUNCTION__ << ": ERROR: Client error while sending (error code = " << error.message() << "): ";
std::cerr << __FUNCTION__ << ": ERROR: Recovering...";
}
perform_run(state); // remaining buffers for run
}
void on_async_testrun() {
perform_run(boost::make_shared<Run>());
}
};
int main(int argc, char const** argv)
{
assert(argc==3);
{
Demo demo;
demo.on_connect(argv[1], argv[2]);
for (int i = 0; i<100; ++i) {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
demo.on_async_testrun();
}
} // Demo destructor joins IO thread, making sure all stats are final
using namespace boost::accumulators;
std::cout << "avg. Buffer size: " << mean(demo_results::bufsize) << ", std.dev. " << sqrt(variance(demo_results::bufsize)) << "\n";
std::cout << "avg. b/w: " << mean(demo_results::mbps) << " mbps, std.dev. " << sqrt(variance(demo_results::mbps)) << "\n";
std::cout << "avg. time: " << mean(demo_results::micros) << " μs, std.dev. " << sqrt(variance(demo_results::micros)) << "\n";
}
Thank you very much for your answer. This was a very good starting point to improve my code.
I changed a little bit the way how to add the async_send_to methods.
void perform_run(Run::Ptr state) {
for(decltype(state->buffersToSend) i = 0; i < state->buffersToSend; i++ )
{
std::fill(state->buffer.begin(), state->buffer.end(), i);
m_pSocket->async_send_to(boost::asio::buffer(state->buffer),
m_ReceiverEndpoint,
boost::bind(&Demo::handle_sent, this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred,
state));
}
}
void handle_sent(boost::system::error_code const&error, size_t actually_transferred, Run::Ptr state)
{
assert(actually_transferred == state->buffer.size());
state->transferredBuffers += 1;
if (error) {
// missing error propagation to main thread
std::cerr << __FUNCTION__ << ": ERROR: Client error while sending (error code = " << error.message() << "): ";
std::cerr << __FUNCTION__ << ": ERROR: Recovering...";
}
if (state->transferredBuffers >= state->buffersToSend ) {
state->stopTimerAndLog();
}
}
And here is the full code in coliru
Greetings,
Thomas

swapcontext. What's meaning of field uc_stack in struct ucontext_t?Who use it? The coroutine or the coroutine's signal handler? How can I test it?

What's meaning of field uc_stack in struct ucontext_t?Who use it? The coroutine or the coroutine's signal handler? How can I test it? For example
#include <iostream>
#include <ucontext.h>
#include <queue>
#include <signal.h>
using namespace std;
void sigHandler(int signo)
{
printf("sigHandler:%x\n", &signo);
exit(-1);
}
queue<int> qProduct;
void consumer(ucontext_t* pConsumer, ucontext_t* pProducer)
{
char a[SIGSTKSZ] = {0};
while(1)
{
if(qProduct.size() > 0)
{
cout << __FUNCTION__ << "|" << qProduct.front() << endl;
qProduct.pop();
}
else
{
cout << pConsumer << "|" << pProducer << endl;
swapcontext(pConsumer, pProducer);
}
}
}
void producer(ucontext_t* pConsumer, ucontext_t* pProducer, bool* pFinished)
{
for(int i=0; i<10; i++)
{
if(qProduct.size() < 5)
{
qProduct.push(i);
cout << __FUNCTION__ << "|" << i << endl;
}
else
{
cout << pConsumer << "|P|" << pProducer << endl;
swapcontext(pProducer, pConsumer);
}
}
cout << pConsumer << "|P|" << pProducer << endl;
swapcontext(pProducer, pConsumer);
*pFinished = true;
}
int main(int argc, char* argv[])
{
ucontext_t Main, Consumer, Producer;
/* The stack for the iterator function. */
char consumer_stack[SIGSTKSZ];
char producer_stack[SIGSTKSZ];
cout << "SIGSTKSZ:" << SIGSTKSZ << endl;
/* Flag indicating that the iterator has completed. */
volatile bool bFinished = false;
getcontext(&Consumer);
Consumer.uc_link = &Main;
Consumer.uc_stack.ss_sp = consumer_stack;
Consumer.uc_stack.ss_size = sizeof(consumer_stack);
makecontext(&Consumer, (void (*)(void))consumer, 2, &Consumer, &Producer);
getcontext(&Producer);
Producer.uc_link = &Main;
Producer.uc_stack.ss_sp = producer_stack;
Producer.uc_stack.ss_size = sizeof(producer_stack);
makecontext(&Producer, (void (*)(void))producer, 3, &Consumer, &Producer, &bFinished);
if(!bFinished)
{
swapcontext(&Main, &Producer);
}
return 0;
}
Who use the stack "consumer_stack", "consumer" or "sigHandler"?How to prove it?