Event loop handling for sd-bus in libuv - c++

We have an eventloop from libuv to handle unixsockets and TCP sockets. The program now also must handle DBus, and we decided to use sd-bus for that.
Lennart wrote on his blog:
Note that our APIs, including sd-bus, integrate nicely into sd-event
event loops, but do not require it, and may be integrated into other
event loops too, as long as they support watching for time and I/O events.
So i assume, it must be possible.
I can get the dbus socket fd via sd_bus_get_fd (sd_bus *bus).
But I can't find any obvious way to stop sd-bus from using its bus_poll method to wait for events internally.
For example when calling a method with sd_bus_call(...) will block with ppoll.
So: How do I handle the dbus events in libuv?

I figured it out, here's an example on how to unite C++, libuv and sd-bus:
I recommend that you read http://0pointer.de/blog/the-new-sd-bus-api-of-systemd.html to understand sd-bus in general.
These are code snippets from my implementation at https://github.com/TheJJ/horst
Method calls can then be done with sd_bus_call_async which does not block (opposed to sd_bus_call).
Don't forget to call update_events() after sd_bus_call_async so the call is sent out over the socket!
/**
* Callback function that is invoked from libuv
* once dbus events flowed in.
*/
static void on_dbus_ready(uv_poll_t *handle, int /*status*/, int /*events*/) {
DBusConnection *connection = (DBusConnection *)handle->data;
sd_bus *bus = connection->get_bus();
// let dbus handle the available events request
while (true) {
// this will trigger the dbus vtable-registered functions
int r = sd_bus_process(bus, nullptr);
if (r < 0) {
printf("[dbus] Failed to process bus: %s", strerror(-r));
break;
}
else if (r > 0) {
// try to process another request!
continue;
}
else {
// no more progress, wait for the next callback.
break;
}
}
// update the events we watch for on the socket.
connection->update_events();
}
/**
* Convert the sdbus-returned poll flags to
* corresponding libuv flags.
*/
int poll_to_libuv_events(int pollflags) {
int ret = 0;
if (pollflags & (POLLIN | POLLPRI)) {
ret |= UV_READABLE;
}
if (pollflags & POLLOUT) {
ret |= UV_WRITABLE;
}
// we also have the non-corresponding UV_DISCONNECT
return ret;
}
class DBusConnection {
public:
DBusConnection(Satellite *sat);
virtual ~DBusConnection() = default;
/** connect to dbus */
int connect() {
int r = sd_bus_open_system(&this->bus);
if (r < 0) {
printf("[dbus] Failed to connect to bus: %s", strerror(-r));
goto clean_return;
}
r = sd_bus_add_object_vtable(
this->bus,
&this->bus_slot,
"/rofl/lol", // object path
"rofl.lol", // interface name
your_vtable,
this // this is the userdata that'll be passed
// to the dbus methods
);
if (r < 0) {
printf("[dbus] Failed to install the horst sdbus object: %s", strerror(-r));
goto clean_return;
}
// register our service name
r = sd_bus_request_name(this->bus, "moveii.horst", 0);
if (r < 0) {
printf("[dbus] Failed to acquire service name: %s", strerror(-r));
goto clean_return;
}
// register the filedescriptor from
// sd_bus_get_fd(bus) to libuv
uv_poll_init(this->loop, &this->connection, sd_bus_get_fd(this->bus));
// make `this` reachable in callbacks.
this->connection.data = this;
// init the dbus-event-timer
uv_timer_init(this->loop, &this->timer);
this->timer.data = this;
// process initial events and set up the
// events and timers for subsequent calls
on_dbus_ready(&this->connection, 0, 0);
printf("[dbus] Listener initialized");
return 0;
clean_return:
sd_bus_slot_unref(this->bus_slot);
sd_bus_unref(this->bus);
this->bus = nullptr;
return 1;
}
/** update the events watched for on the filedescriptor */
void update_events() {
sd_bus *bus = this->get_bus();
// prepare the callback for calling us the next time.
int new_events = poll_to_libuv_events(
sd_bus_get_events(bus)
);
uint64_t usec;
int r = sd_bus_get_timeout(bus, &usec);
if (not r) {
// if the timer is running already, it is stopped automatically
// inside uv_timer_start.
uv_timer_start(
&this->timer,
[] (uv_timer_t *handle) {
// yes, handle is not a poll_t, but
// we just care for its -> data member anyway.
on_dbus_ready((uv_poll_t *)handle, 0, 0);
},
usec / 1000, // time in milliseconds, sd_bus provides µseconds
0 // don't repeat
);
}
// always watch for disconnects:
new_events |= UV_DISCONNECT;
// activate the socket watching,
// and if active, invoke the callback function
uv_poll_start(&this->connection, new_events, &on_dbus_ready);
}
/** close the connections */
int close() {
// TODO: maybe this memoryerrors when the loop actually
// does the cleanup. we have to wait for the callback.
uv_close((uv_handle_t *) &this->timer, nullptr);
uv_poll_stop(&this->connection);
sd_bus_close(this->bus);
sd_bus_slot_unref(this->bus_slot);
sd_bus_unref(this->bus);
return 0;
}
/**
* Return the bus handle.
*/
sd_bus *get_bus() const {
return this->bus;
}
protected:
/**
* loop handle
*/
uv_loop_t *loop;
/**
* polling object for dbus events
*/
uv_poll_t connection;
/**
* dbus also wants to be called periodically
*/
uv_timer_t timer;
/**
* dbus bus handle
*/
sd_bus *bus;
/**
* dbus slot handle
*/
sd_bus_slot *bus_slot;
};

Related

C++ GRPC Async Bidirectional Streaming - How to tell when a client sent a message?

There is zero documentation how to do an async bidirectional stream with grpc. I've made guesses by piecing together the regular async examples with what I found in peope's github.
With the frankestein code I have, I cannot figure out how to tell when a client sent me a message. Here is the procedure I have running on its own thread.
void GrpcStreamingServerImpl::listeningThreadProc()
{
try
{
// I think we make a call to the RPC method and wait for others to stream to it?
::grpc::ServerContext context;
void * ourOneAndOnlyTag = reinterpret_cast<void *>(1); ///< Identifies the call we are going to make. I assume we can only handle one client
::grpc::ServerAsyncReaderWriter<mycompanynamespace::OutputMessage,
mycompanynamespace::InputMessage>
stream(&context);
m_service.RequestMessageStream(&context, &stream, m_completionQueue.get(), m_completionQueue.get(), ourOneAndOnlyTag);
// Now I'm going to loop and get events from the completion queue
bool keepGoing = false;
do
{
void* tag = nullptr;
bool ok = false;
const std::chrono::time_point<std::chrono::system_clock> deadline(std::chrono::system_clock::now() +
std::chrono::seconds(1));
grpc::CompletionQueue::NextStatus nextStatus = m_completionQueue->AsyncNext(&tag, &ok, deadline);
switch(nextStatus)
{
case grpc::CompletionQueue::NextStatus::TIMEOUT:
{
keepGoing = true;
break;
}
case grpc::CompletionQueue::NextStatus::GOT_EVENT:
{
keepGoing = true;
if(ok)
{
// This seems to get called if a client connects
// It does not get called if we didn't call 'RequestMessageStream' before the loop started
// TODO - How do we tell when the client send us a messages?
// TODO - How do we know if they are just connecting?
// TODO - How do we get the message client sent?
// The tag corresponds to the request we made
if(tag == reinterpret_cast<void *>(1))
{
// SNIP successful writing of a message
stream.Write(*(outputMessage.get()), reinterpret_cast<void*>(2));
}
else if(tag == reinterpret_cast<void *>(2))
{
// This is telling us the message we sent was completed
}
else
{
// TODO - I dunno what else it can be
}
}
break;
}
case grpc::CompletionQueue::NextStatus::SHUTDOWN:
{
keepGoing = false;
break;
}
}
} while(keepGoing);
// Completion queue was shutdown
}
catch(std::exception& e)
{
QString errorMessage(
QString("An std::exception was caught in the listening thread. Exception message: %1").arg(e.what()));
m_backPointer->onImplError(errorMessage);
}
catch(...)
{
QString errorMessage("An exception of unknown type, was caught in the listening thread.");
m_backPointer->onImplError(errorMessage);
}
}
Setup looked like this
// Start up the grpc service
grpc::ServerBuilder builder;
builder.RegisterService(&m_service);
builder.AddListeningPort(endpoint.toStdString(), grpc::InsecureServerCredentials());
m_completionQueue = builder.AddCompletionQueue();
m_server = builder.BuildAndStart();
// Start the listening thread
m_listeningThread = QThread::create(&GrpcStreamingServerImpl::listeningThreadProc, this);

AMQP-CPP: Broken pipe error in TCP Handler

Unfortunately, in my project I always end up in the onError function in the event handler with the error message "Broken pipe". Unfortunately, I never get into the onConnected state. The monitor Funktion in the Event Handler is called twice with the Flag AMQP::readable. After that, it is called with no flags set, that is the time when my pipe gets broken.
Here is what I do in my code.
First I open the connection:
int Communicator_RabbitMQ::Open(string device)
{
AMQP::Address address(AMQP::Address("amqp://test:test#localhost/"));
// make a connection
m_connection = std::make_shared< AMQP::TcpConnection> (&oCommunicator_RabbitMQ_Handler, address);
// we need a channel too
m_channel = std::make_shared <AMQP::TcpChannel> (m_connection.get());
m_channel->declareExchange("my-exchange", AMQP::fanout);
m_channel->declareQueue("my-queue");
m_channel->bindQueue("my-exchange", "my-queue", "my-routing-key");
m_channel->declareExchange("cyos_tx_exchange", AMQP::direct);
m_channel->declareQueue("cyos_queue");
m_channel->bindQueue("cyos_tx_exchange", "cyos_queue", "");
return true;
}
then I call the read function cyclically in my thread:
string Communicator_RabbitMQ::Read()
{
int result = 0;
int maxfd = 1;
struct timeval tv
{
1, 0
};
string returnValue; //Rückgabe der Methode
string message; // Nachricht aus RabbitMQ
try
{
FD_ZERO(&oCommunicator_RabbitMQ_Handler.m_set);
FD_SET(oCommunicator_RabbitMQ_Handler.m_fd, &oCommunicator_RabbitMQ_Handler.m_set);
if (oCommunicator_RabbitMQ_Handler.m_fd != -1)
{
maxfd = oCommunicator_RabbitMQ_Handler.m_fd + 1;
}
result = select(FD_SETSIZE, &oCommunicator_RabbitMQ_Handler.m_set, NULL, NULL, &tv);
if ((result == -1) && errno == EINTR)
{
TRACE(L"Error in socket");
}
else if (result > 0)
{
if (oCommunicator_RabbitMQ_Handler.m_flags & AMQP::readable)
TRACE(L"Got something");
if (FD_ISSET(oCommunicator_RabbitMQ_Handler.m_fd, &oCommunicator_RabbitMQ_Handler.m_set))
{
m_connection->process(oCommunicator_RabbitMQ_Handler.m_fd, oCommunicator_RabbitMQ_Handler.m_flags);
}
}
}
catch (exception e)
{
cout << e.what();
}
return "";
}
Here is the TCP Event Handler:
#pragma once
class Communicator_RabbitMQ_Handler : public AMQP::TcpHandler
{
private:
/**
* Method that is called when the connection succeeded
* #param socket Pointer to the socket
*/
virtual void onConnected(AMQP::TcpConnection* connection)
{
std::cout << "connected" << std::endl;
}
/**
* When the connection ends up in an error state this method is called.
* This happens when data comes in that does not match the AMQP protocol
*
* After this method is called, the connection no longer is in a valid
* state and can be used. In normal circumstances this method is not called.
*
* #param connection The connection that entered the error state
* #param message Error message
*/
virtual void onError(AMQP::TcpConnection* connection, const char* message)
{
// report error
std::cout << "AMQP TCPConnection error: " << message << std::endl;
}
/**
* Method that is called when the connection was closed.
* #param connection The connection that was closed and that is now unusable
*/
virtual void onClosed(AMQP::TcpConnection* connection)
{
std::cout << "closed" << std::endl;
}
/**
* Method that is called by AMQP-CPP to register a filedescriptor for readability or writability
* #param connection The TCP connection object that is reporting
* #param fd The filedescriptor to be monitored
* #param flags Should the object be monitored for readability or writability?
*/
virtual void monitor(AMQP::TcpConnection* connection, int fd, int flags)
{
//TRACE(L"Communicator_RabbitMQ_Handler, monitor called, %d, %d, %x", fd, flags, &m_set);
// we did not yet have this watcher - but that is ok if no filedescriptor was registered
if (flags == 0)
return;
if (flags & AMQP::readable)
{
FD_SET(fd, &m_set);
m_fd = fd;
m_flags = flags;
}
}
public:
Communicator_RabbitMQ_Handler() = default;
int m_fd = -1;
int m_flags = 0;
fd_set m_set;
};
RabbitMQ Log entry:
2018-07-02 07:04:50.272 [info] <0.9653.0> accepting AMQP connection <0.9653.0> ([::1]:39602 -> [::1]:5672)
2018-07-02 07:04:50.273 [warning] <0.9653.0> closing AMQP connection <0.9653.0> ([::1]:39602 -> [::1]:5672):
{handshake_timeout,handshake}
I finally fixed this problem by increasing the hanshake timeout to 20 seconds in the rabbitmq.config file. I just added the following in that file:
handshake_timeout = 20000
The value is given in milliseconds, an the default is 10 seconds, which seems not enough for my solution.

Assertion error on ZeroMQ Majordomo worker

I got a problem with the ZeroMQ Majordomo worker API, which fails on an assertion, using this simple worker, client.
The broker I am using is all from the example section from ZeroMQ site. What's the m_reply_to used for and when is it set?
mdwrkapi.hpp:123: zmsg* mdwrk::recv(zmsg*&): Assertion `m_reply_to.size()!=0' failed.
Here is the worker code.
mdwrk session ("tcp://localhost:5555", "GenericData", verbose);
zmsg *reply = 0;
while (1) {
zmsg *request = session.recv (reply);
if (request == 0) {
break; // Worker was interrupted
}
reply = request; // Echo is complex… :-)
}
And here is the client part:
mdcli session ("tcp://localhost:5555", verbose);
int count = 1;
while(1) {
zmsg * request = new zmsg("Hello world");
zmsg * reply = session.send ("GenericData", request);
if (reply) {
delete reply;
} else {
continue; // Interrupt or failure
puts("Interupt or failure");
}
sleep(1);
puts("sleeping");
}
What's the m_reply_to used for?
As taken from the Majordomo source code, m_reply_to is declared as:
/* =====================================================================
mdwrkapi.hpp
Majordomo Protocol Worker API
Implements the MDP/Worker spec at http://rfc.zeromq.org/spec:7.
---------------------------------------------------------------------
Copyright (c) 1991-2011 iMatix Corporation <www.imatix.com>
...
*/
...
private:
...
// Return address, if any
std::string m_reply_to; // <<------------------------- RETURN ADDRESS
and serves for storing a return address like here, in recv():
// We should pop and save as many addresses as there are
// up to a null part, but for now, just save one...
m_reply_to = msg->unwrap ();
When it is set?
As taken from the source code, it may happen inside a recv():
// ---------------------------------------------------------------------
// Send reply, if any, to broker and wait for next request.
zmsg *
recv (zmsg *&reply_p)
{
// Format and send the reply if we were provided one
zmsg *reply = reply_p;
assert (reply || !m_expect_reply);
if (reply) {
assert (m_reply_to.size()!=0);
...

How best to interrupt a zeroMQ poll method for cleanup and termination

Writing in C++ I have a thread that uses the zmq poll method to discover when there are new events to process, which works fine. What I want though is this thread to exit while cleaning up nicely when there are no more events expected.
Rather than infinite while loop I could put a condition in there but it would require REQUEST_TIMEOUT_MS to get there. So my question is, what is the best method to interrupt the poll for program exit?
void * Requester::recieve_thread(void *arg) {
zmq::socket_t * soc = (zmq::socket_t *) arg;
zmq::pollitem_t items[] = { { *soc, 0, ZMQ_POLLIN, 0 } };
while (1) {
zmq::poll(&items[0], 1, REQUEST_TIMEOUT_MS);
if (items[0].revents & ZMQ_POLLIN) {
// process the event
}
}
// clean up
}
It is often mentioned that you can just destroy the zmq context and anything sharing that context will exit, however this creates a nightmare because that will delete the socket objects and your exiting code has to do its best in avoiding a minefield of dead pointers.
Attempting to close the socket doesn't work either because they are not thread safe and you'll end up with crashes.
ANSWER: The best way is to do as the ZeroMQ guide suggests for any use via multiple threads; use zmq sockets and not thread mutexes/locks/etc.
Requester::Requester(zmq::context_t* context)
{
m_context = context;
// Create a socket that you'll use as the interrupt-event receiver
// I'm using a random address and an inproc socket (inprocs need to share a context)
snprintf(m_signalStopAddr, sizeof(m_signalStopAddr) / sizeof(*m_signalStopAddr), "inproc://%lx%x", (unsigned long)this, rand());
m_signalStop = new zmq::socket_t(m_context, ZMQ_PAIR);
m_signalStop->bind(m_signalStopAddr);
}
// Your thread-safe poll interrupter
Requester::interrupt()
{
char dummy;
zmq::socket_t doSignal(m_context, ZMQ_PAIR);
doSignal.connect(m_signalStopAddr);
doSignal.send(&dummy, sizeof(dummy));
}
void * Requester::recieve_thread(void *arg)
{
zmq::socket_t * soc = (zmq::socket_t *) arg;
zmq::pollitem_t items[] =
{
{ *soc, 0, ZMQ_POLLIN, 0 },
{ *m_signalStop, 0, ZMQ_POLLIN, 0 }
};
while (1)
{
zmq::poll(items, 2, REQUEST_TIMEOUT_MS);
if (items[1].revents & ZMQ_POLLIN)
{
break; // exit
}
if (items[0].revents & ZMQ_POLLIN)
{
// process the event
}
}
// Cleanup
}
zmq::context_t* m_context;
zmq::socket_t* m_signalStop; // Don't forget to delete this!
char m_signalStopAddr[100];
Don't interrupt the poll - send the thread a message instructing it to clean up and exit.

how to wakeup select() within timeout from another thread

According to the "man select" information:
"On success, select() and pselect() return the number of file descrip‐
tors contained in the three returned descriptor sets which may be zero
if the timeout expires before anything interesting happens. On error,
-1 is returned, and errno is set appropriately; the sets and timeout become
undefined, so do not rely on their contents after an error."
Select will wakup because of:
1)read/write availability
2)select error
3)descriptoris closed.
However, how can we wake up the select() from another thread if there is no data available and the select is still within timeout?
[update]
Pseudo Code
// Thread blocks on Select
void *SocketReadThread(void *param){
...
while(!(ReadThread*)param->ExitThread()) {
struct timeval timeout;
timeout.tv_sec = 60; //one minute
timeout.tv_usec = 0;
fd_set rds;
FD_ZERO(&rds);
FD_SET(sockfd, &rds)'
//actually, the first parameter of select() is
//ignored on windows, though on linux this parameter
//should be (maximum socket value + 1)
int ret = select(sockfd + 1, &rds, NULL, NULL, &timeout );
//handle the result
//might break from here
}
return NULL;
}
//main Thread
int main(){
//create the SocketReadThread
ReaderThread* rthread = new ReaderThread;
pthread_create(&pthreadid, NULL, SocketReaderThread,
NULL, (void*)rthread);
// do lots of things here
............................
//now main thread wants to exit SocketReaderThread
//it sets the internal state of ReadThread as true
rthread->SetExitFlag(true);
//but how to wake up select ??????????????????
//if SocketReaderThread currently blocks on select
}
[UPDATE]
1) #trojanfoe provides a method to achieve this, his method writes socket data (maybe dirty data or exit message data) to wakeup select. I am going to have a test and update the result there.
2) Another thing to mention, closing a socket doesn't guarantee to wake up select function call, please see this post.
[UPDATE2]
After doing many tests, here are some facts about waking up select:
1) If the socket watched by select is closed by another application, then select() calling
will wakeup immediately. Hereafter, reading from or writing to the socket will get return value of 0 with an errno = 0
2) If the socket watched by select is closed by another thread of the same application,
then select() won't wake up until timeout if there is no data to read or write. After select timeouts, making read/write operation results in an error with errno = EBADF
(because the socket has been closed by another thread during timeout period)
I use an event object based on pipe():
IoEvent.h:
#pragma once
class IoEvent {
protected:
int m_pipe[2];
bool m_ownsFDs;
public:
IoEvent(); // Creates a user event
IoEvent(int fd); // Create a file event
IoEvent(const IoEvent &other);
virtual ~IoEvent();
/**
* Set the event to signalled state.
*/
void set();
/**
* Reset the event from signalled state.
*/
void reset();
inline int fd() const {
return m_pipe[0];
}
};
IoEvent.cpp:
#include "IoEvent.h"
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
using namespace std;
IoEvent::IoEvent() :
m_ownsFDs(true) {
if (pipe(m_pipe) < 0)
throw MyException("Failed to create pipe: %s (%d)", strerror(errno), errno);
if (fcntl(m_pipe[0], F_SETFL, O_NONBLOCK) < 0)
throw MyException("Failed to set pipe non-blocking mode: %s (%d)", strerror(errno), errno);
}
IoEvent::IoEvent(int fd) :
m_ownsFDs(false) {
m_pipe[0] = fd;
m_pipe[1] = -1;
}
IoEvent::IoEvent(const IoEvent &other) {
m_pipe[0] = other.m_pipe[0];
m_pipe[1] = other.m_pipe[1];
m_ownsFDs = false;
}
IoEvent::~IoEvent() {
if (m_pipe[0] >= 0) {
if (m_ownsFDs)
close(m_pipe[0]);
m_pipe[0] = -1;
}
if (m_pipe[1] >= 0) {
if (m_ownsFDs)
close(m_pipe[1]);
m_pipe[1] = -1;
}
}
void IoEvent::set() {
if (m_ownsFDs)
write(m_pipe[1], "x", 1);
}
void IoEvent::reset() {
if (m_ownsFDs) {
uint8_t buf;
while (read(m_pipe[0], &buf, 1) == 1)
;
}
}
You could ditch the m_ownsFDs member; I'm not even sure I use that any more.