gRPC UDP with Deadline - c++

I have created a client-server program based on one of the tests in the gRPC repo.
The UDP code in gRPC is not built on top of its RPC layer, and so there is no notion of stubs, etc.
My code works, though I've noticed that under just a mild stress, a huge fraction of messages get dropped, and I'm not sure if it's entirely due to the lossy nature of UDP or it's something about my code.
I have two questions:
Main question: Is there a gRPC-way to set deadlines for UDP messages? I am familiar with ClientContext and its deadline feature, but I don't know how to use it in a non-TCP RPC-less code. If not, what is the best way to achieve this?
Is a drop rate of %50 for a UDP localhost communication sensible?
My code (It's quite long, so just attaching it for reference. My main question doesn't require reading the code):
#include <netdb.h>
#include <string>
#include <thread>
#include <vector>
// grpc headers
#include <grpcpp/grpcpp.h>
#include "src/core/lib/iomgr/udp_server.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
using namespace std;
int client_port = 6666;
int server_port = 5555;
int num_of_msgs = 1000;
int listening_port;
int remote_port;
int fd;
int received_msgs_cnt = 0;
vector<bool> is_received(num_of_msgs, false);
enum Role {
CLIENT,
SERVER
};
struct Request {
int id;
};
struct Response {
int id;
};
Role role;
bool udpServerFinished = false;
void sendUdp(const char *hostname, int port, const char* payload, size_t size) {
auto transferred = write(fd, (void*)payload, size);
assert(size == transferred);
}
/***************************************
* UDP Handler class
* (will be generated by factory class)
* upon receiving a new message, the Read()
* function is invoked
***************************************/
class UdpHandler : public GrpcUdpHandler {
public:
UdpHandler(grpc_fd *emfd, void *user_data):
GrpcUdpHandler(emfd, user_data), emfd_(emfd) {
}
virtual ~UdpHandler() {}
static void startLoop(volatile bool &udpServerFinished) {
grpc_core::ExecCtx exec_ctx;
grpc_millis deadline;
gpr_mu_lock(g_mu);
while (!udpServerFinished) {
deadline = grpc_timespec_to_millis_round_up(gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(10000, GPR_TIMESPAN)));
grpc_pollset_worker *worker = nullptr;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work", grpc_pollset_work(UdpHandler::g_pollset, &worker, deadline)));
gpr_mu_unlock(UdpHandler::g_mu);
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(UdpHandler::g_mu);
}
gpr_mu_unlock(g_mu);
}
static grpc_pollset *g_pollset;
static gpr_mu *g_mu;
public:
static int g_num_listeners;
protected:
bool Read() override {
char read_buffer[512];
ssize_t byte_count;
gpr_mu_lock(UdpHandler::g_mu);
byte_count = recv(grpc_fd_wrapped_fd(emfd()), read_buffer, sizeof(read_buffer), 0);
processIncomingMsg((void*)read_buffer, byte_count);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(UdpHandler::g_mu);
return false;
}
void processIncomingMsg(void* msg, ssize_t size) {
received_msgs_cnt++;
(void)size;
int id;
if (role == Role::CLIENT) {
Response res;
assert(size == sizeof(Response));
memcpy((void*)&res, (void*)msg, size);
id = res.id;
cout << "Msg: response for request " << res.id << endl;
}
else {
Request req;
assert(size == sizeof(Request));
memcpy((void*)&req, (void*)msg, size);
id = req.id;
cout << "Msg: request " << req.id << endl;
// send response
Response res;
res.id = req.id;
sendUdp("127.0.0.1", remote_port, (const char*)&res, sizeof(Response));
}
// check for termination condition (both for client and server)
if (received_msgs_cnt == num_of_msgs) {
cout << "This is the last msg" << endl;
udpServerFinished = true;
}
// mark the id of the current message
is_received[id] = true;
// if this was the last message, print the missing msg ids
if (id == num_of_msgs - 1) {
cout << "missing ids: ";
for (int i = 0; i < num_of_msgs; i++) {
if (is_received[i] == false)
cout << i << ", ";
}
cout << endl;
cout << "% of missing messages: "
<< 1.0 - ((double)received_msgs_cnt / num_of_msgs) << endl;
}
}
void OnCanWrite(void* /*user_data*/, grpc_closure* /*notify_on_write_closure*/) override {
gpr_mu_lock(g_mu);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(g_mu);
}
void OnFdAboutToOrphan(grpc_closure *orphan_fd_closure, void* /*user_data*/) override {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, orphan_fd_closure, GRPC_ERROR_NONE);
}
grpc_fd *emfd() { return emfd_; }
private:
grpc_fd *emfd_;
};
int UdpHandler::g_num_listeners = 1;
grpc_pollset *UdpHandler::g_pollset;
gpr_mu *UdpHandler::g_mu;
/****************************************
* Factory class (generated UDP handler)
****************************************/
class UdpHandlerFactory : public GrpcUdpHandlerFactory {
public:
GrpcUdpHandler *CreateUdpHandler(grpc_fd *emfd, void *user_data) override {
UdpHandler *handler = new UdpHandler(emfd, user_data);
return handler;
}
void DestroyUdpHandler(GrpcUdpHandler *handler) override {
delete reinterpret_cast<UdpHandler *>(handler);
}
};
/****************************************
* Main function
****************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
string r(argv[1]);
if (r == "client") {
cout << "Client is initializing to send requests!" << endl;
role = Role::CLIENT;
listening_port = client_port;
remote_port = server_port;
}
else if (r == "server") {
cout << "Server is initializing to accept requests!" << endl;
role = Role::SERVER;
listening_port = server_port;
remote_port = client_port;
}
else {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
/********************************************************
* Initialize UDP Listener
********************************************************/
/* Initialize the grpc library. After it's called,
* a matching invocation to grpc_shutdown() is expected. */
grpc_init();
grpc_core::ExecCtx exec_ctx;
UdpHandler::g_pollset = static_cast<grpc_pollset *>(
gpr_zalloc(grpc_pollset_size()));
grpc_pollset_init(UdpHandler::g_pollset, &UdpHandler::g_mu);
grpc_resolved_address resolved_addr;
struct sockaddr_storage *addr =
reinterpret_cast<struct sockaddr_storage *>(resolved_addr.addr);
int svrfd;
grpc_udp_server *s = grpc_udp_server_create(nullptr);
grpc_pollset *pollsets[1];
memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
addr->ss_family = AF_INET;
grpc_sockaddr_set_port(&resolved_addr, listening_port);
/* setup UDP server */
UdpHandlerFactory handlerFactory;
int rcv_buf_size = 1024;
int snd_buf_size = 1024;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
snd_buf_size, &handlerFactory,
UdpHandler::g_num_listeners) > 0);
svrfd = grpc_udp_server_get_fd(s, 0);
GPR_ASSERT(svrfd >= 0);
GPR_ASSERT(getsockname(svrfd, (struct sockaddr *) addr,
(socklen_t *) &resolved_addr.len) == 0);
GPR_ASSERT(resolved_addr.len <= sizeof(struct sockaddr_storage));
pollsets[0] = UdpHandler::g_pollset;
grpc_udp_server_start(s, pollsets, 1, nullptr);
string addr_str = grpc_sockaddr_to_string(&resolved_addr, 1);
cout << "UDP Server listening on: " << addr_str << endl;
thread udpPollerThread(
UdpHandler::startLoop, ref(udpServerFinished));
/********************************************************
* Establish connection to the other side
********************************************************/
struct sockaddr_in serv_addr;
struct hostent *server = gethostbyname("127.0.0.1");
bzero((char *) &serv_addr, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
bcopy((char *) server->h_addr,
(char *) &serv_addr.sin_addr.s_addr,
server->h_length);
serv_addr.sin_port = htons(remote_port);
fd = socket(serv_addr.sin_family, SOCK_DGRAM, 0);
GPR_ASSERT(fd >= 0);
GPR_ASSERT(connect(fd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) == 0);
/********************************************************
* Send requests
********************************************************/
if (role == Role::CLIENT) {
static int counter = 0;
for (int i = 0; i < num_of_msgs; i++) {
Request req;
req.id = counter++;
cout << "Sending request " << req.id << endl;
sendUdp("127.0.0.1", remote_port, (char*)&req, sizeof(Request));
}
}
/********************************************************
* wait for client to finish
********************************************************/
udpPollerThread.join();
/********************************************************
* cleanup
********************************************************/
close(fd);
gpr_free(UdpHandler::g_pollset);
grpc_shutdown();
cout << "finished successfully!" << endl;
return 0;
}
Compiled with:
-std=c++17 -I$(GRPC_DIR) -I$(GRPC_DIR)/third_party/abseil-cpp.
Linked with:
pkg-config --libs grpc++

Related

Boost Asio, console pinger throws exception on socket.send (...)

I'm trying to figure out how ICMP and Boost Asio work. There was a problem sending the packet to the endpoint. The entered url is translated into ip and a socket connection is made. The problem is that when a packet is sent via socket.send (...), an exception is thrown.
Exception: send: Bad address
#include <algorithm>
#include <chrono>
#include <functional>
#include <iostream>
#include <memory>
#include <tuple>
//BOOST
#include <boost/asio.hpp>
#include <boost/program_options.hpp>
#include <boost/log/trivial.hpp>
//CONSTANTS
#define BUFFER_SIZE_64KB 65536
#define TTL_DEFAULT 64
#define ICMP_HDR_SIZE 8
#define LINUX_PAYLOAD_SIZE 56
#define TIME_BYTE_SIZE 4
#define FILL_BYTE 0X8
template <typename T, typename flag_type = int>
using flagged = std::tuple<flag_type, T>;
using namespace boost::asio;
typedef boost::system::error_code error_code;
typedef unsigned char byte;
enum ICMP : uint8_t {
ECHO_REPLY = 0,
UNREACH = 3,
TIME_EXCEEDED = 11,
ECHO_REQUEST = 8
};
enum class IPtype {IPV4, IPV6, BOTH};
struct icmp_header_t {
uint8_t type;
uint8_t code;
uint16_t checksum;
uint16_t id;
uint16_t seq_num;
};
struct ip_header_t {
uint8_t ver_ihl;
uint8_t tos;
uint16_t total_length;
uint16_t id;
uint16_t flags_fo;
uint8_t ttl;
uint8_t protocol;
uint16_t checksum;
uint32_t src_addr;
uint32_t dst_addr;
};
ip_header_t ip_load(std::istream& stream, bool ntoh ) {
ip_header_t header;
stream.read((char*)&header.ver_ihl, sizeof(header.ver_ihl));
stream.read((char*)&header.tos, sizeof(header.tos));
stream.read((char*)&header.total_length, sizeof(header.total_length));
stream.read((char*)&header.id, sizeof(header.id));
stream.read((char*)&header.flags_fo, sizeof(header.flags_fo));
stream.read((char*)&header.ttl, sizeof(header.ttl));
stream.read((char*)&header.protocol, sizeof(header.protocol));
stream.read((char*)&header.checksum, sizeof(header.checksum));
stream.read((char*)&header.src_addr, sizeof(header.src_addr));
stream.read((char*)&header.dst_addr, sizeof(header.dst_addr));
if (ntoh) {
header.total_length = ntohs(header.total_length);
header.id = ntohs(header.id);
header.flags_fo = ntohs(header.flags_fo);
header.checksum = ntohs(header.checksum);
header.src_addr = ntohl(header.src_addr);
header.dst_addr = ntohl(header.dst_addr);
}
return header;
}
icmp_header_t icmp_load(std::istream& stream) {
icmp_header_t header;
stream.read((char*)&header.type, sizeof(header.type));
stream.read((char*)&header.code, sizeof(header.code));
stream.read((char*)&header.checksum, sizeof(header.checksum));
stream.read((char*)&header.id, sizeof(header.id));
stream.read((char*)&header.seq_num, sizeof(header.seq_num));
return header;
}
flagged<ip::icmp::endpoint> sync_icmp_solver(io_service& ios, std::string host,
IPtype type = IPtype::BOTH) noexcept {
ip::icmp::resolver::query query(host, "");
ip::icmp::resolver resl(ios);
ip::icmp::endpoint ep;
error_code ec;
auto it = resl.resolve(query, ec);
if (ec != boost::system::errc::errc_t::success) {
std::cerr << "Error message = " << ec.message() << std::endl;
return std::make_tuple(ec.value(), ep);
}
ip::icmp::resolver::iterator it_end;
//Finds first available ip.
while (it != it_end) {
ip::icmp::endpoint ep = (it++)->endpoint();
auto addr = ep.address();
switch(type) {
case IPtype::IPV4:
if (addr.is_v4()) return std::make_tuple(0, ep);
break;
case IPtype::IPV6:
if(addr.is_v6()) return std::make_tuple(0, ep);
break;
case IPtype::BOTH:
return std::make_tuple(0, ep);
break;
}
}
return std::make_tuple(-1, ep);
}
unsigned short checksum(void *b, int len) {
unsigned short* buf = reinterpret_cast<unsigned short*>(b);
unsigned int sum = 0;
unsigned short result;
for (sum = 0; len > 1; len -= 2 ) {
sum += *buf++;
}
if (len == 1) sum += *(byte*) buf;
sum = (sum >> 16) + (sum & 0xFFFF);
sum += (sum >> 16);
result = ~sum;
return result;
}
unsigned short get_identifier() {
#if defined(BOOST_WINDOWS)
return static_cast<unsigned short>(::GetCurrentProcessId());
#else
return static_cast<unsigned short>(::getpid());
#endif
}
struct PingInfo {
unsigned short seq_num = 0;
size_t time_out;
size_t reply_time = 1;
size_t payload_size = LINUX_PAYLOAD_SIZE;
size_t packets_rec = 0;
size_t packets_trs = 0;
size_t reps = 0;
};
class PingConnection {
private:
ip::icmp::socket sock;
io_service* ios_ptr;
PingInfo* pi_ptr;
ip::icmp::endpoint dst;
boost::posix_time::ptime timestamp;
streambuf input_buf;
deadline_timer deadtime;
//TODO: Check for memleaks.
void write_icmp_req(std::ostream& os) {
byte* pckt = new byte[ICMP_HDR_SIZE + pi_ptr->payload_size];
unsigned short pid = get_identifier();
pckt[0] = 0x8;
pckt[1] = 0x0;
pckt[2] = 0x0;
pckt[3] = 0x0;
pckt[4] = (byte)((pid & 0xF0) >> 4);
pckt[5] = (byte)(pid & 0x0F);
for (size_t i = ICMP_HDR_SIZE; i < ICMP_HDR_SIZE + pi_ptr->payload_size; i++) {
pckt[i] = FILL_BYTE;
}
pckt[6] = (byte)((pi_ptr->seq_num & 0xF0) >> 4);
pckt[7] = (byte)((pi_ptr->seq_num)++ & 0x0F);
unsigned short cs = checksum(pckt, ICMP_HDR_SIZE);
pckt[2] = (byte)((cs & 0xF0) >> 4);
pckt[3] = (byte)(cs & 0x0F);
os << pckt;
delete [] pckt;
}
void pckt_send() {
streambuf buf;
std::ostream os(&buf);
write_icmp_req(os);
timestamp = boost::posix_time::microsec_clock::universal_time();
std::cout << "begin" << std::endl;
sock.send(buf.data());
std::cout << "sock.send(buf.data())" << std::endl;
deadtime.expires_at(timestamp + boost::posix_time::seconds(pi_ptr->time_out));
deadtime.async_wait(std::bind(&PingConnection::req_timeout_callback, this));
}
void req_timeout_callback() {
if (pi_ptr->reps == 0) {
std::cout << "Time Out:echo req" << std::endl;
}
deadtime.expires_at(timestamp + boost::posix_time::seconds(pi_ptr->reply_time));
deadtime.async_wait(std::bind(&PingConnection::pckt_send, this));
}
void pckt_recv() {
std::cout << "pckt_recv" << std::endl;
input_buf.consume(input_buf.size());
sock.async_receive(input_buf.prepare(BUFFER_SIZE_64KB),
std::bind(&PingConnection::recv_timeout_callback, this, std::placeholders::_2));
}
void recv_timeout_callback(size_t sz) {
std::cout << "recv_timeout_callback" << std::endl;
input_buf.commit(sz);
std::istream is(&input_buf);
ip_header_t iph = ip_load(is, false);
icmp_header_t icmph = icmp_load(is);
if (is &&
icmph.type == ECHO_REQUEST &&
icmph.id == get_identifier() &&
icmph.seq_num == pi_ptr->seq_num) {
// If this is the first reply, interrupt the five second timeout.
if (pi_ptr->reps++ == 0) deadtime.cancel();
boost::posix_time::ptime now = boost::posix_time::microsec_clock::universal_time();
std::cout << sz - iph.total_length
<< " bytes from " << iph.src_addr
<< ": icmp_seq=" << icmph.seq_num
<< ", ttl=" << iph.ttl
<< ", time=" << (now - timestamp).total_milliseconds() << " ms"
<< std::endl;
}
pckt_recv();
}
public:
PingConnection(io_service& ios, PingInfo& pi_add) : deadtime(ios), sock(ios) {
pi_ptr = &pi_add;
ios_ptr = &ios;
}
void ping(std::string host) {
int err_flag;
error_code error;
std::tie(err_flag, dst) = sync_icmp_solver(*ios_ptr, host);
if (err_flag) return;
std::cout << dst << std::endl;
sock.connect(dst, error);
if(error) {
return;
}
std::cout << "sock.connect(dst)" << error.message() <<std::endl;
pckt_send();
pckt_recv();
}
};
int main(int argc, char** argv) {
try
{
if (argc < 2) {
std::cerr << "Usage: ping [args]* destination\n";
return -1;
}
io_service ios;
PingInfo pi;
pi.time_out = 56;
PingConnection ping(ios, pi);
ping.ping(argv[1]);
ios.run();
} catch(std::exception& e) {
std::cerr << "Exception: " << e.what() << std::endl;
}
}
socket.send() is called in pckt_send()
For development I use WSL2 and Ubuntu image.

How to use the libuv to accept the tcp connection with multi-thread?

I write a C++ dome of tcp server with the libuv. When I check the cpu performance, I found the dome is a single thread running, how can I implement it with multi-thread?
Currently, the dome can hanlde 100,000+ tcp request per second, it can only eat 1 CPU.
Code:
#include <iostream>
#include <atomic>
#include "uv.h"
#include <thread>
#include <mutex>
#include <map>
using namespace std;
auto loop = uv_default_loop();
struct sockaddr_in addr;
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
typedef struct {
uv_stream_t* client;
uv_alloc_cb alloc_cb;
uv_read_cb read_cb;
} begin_read_req;
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
}
void free_write_req(uv_write_t *req) {
write_req_t *wr = (write_req_t*)req;
free(wr->buf.base);
free(wr);
}
void echo_write(uv_write_t *req, int status) {
if (status) {
fprintf(stderr, "Write error %s\n", uv_strerror(status));
}
free_write_req(req);
}
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (nread > 0) {
auto req = (write_req_t*)malloc(sizeof(write_req_t));
auto *aaa = (char*)malloc(5);
aaa[0] = '+';
aaa[1] = 'O';
aaa[2] = 'K';
aaa[3] = '\r';
aaa[4] = '\n';
req->buf = uv_buf_init(aaa, 5);
uv_write((uv_write_t*)req, client, &req->buf, 1, echo_write);
}
if (nread < 0) {
if (nread != UV_EOF)
fprintf(stderr, "Read error %s\n", uv_err_name(static_cast<unsigned int>(nread)));
uv_close((uv_handle_t*)client, nullptr);
}
free(buf->base);
}
void acceptClientRead(uv_work_t *req) {
begin_read_req *data = (begin_read_req *)req->data;
uv_read_start(data->client, data->alloc_cb, data->read_cb);
}
void on_new_connection(uv_stream_t *server, int status) {
if (status < 0) {
cout << "New connection error:" << uv_strerror(status);
return;
}
uv_tcp_t *client = (uv_tcp_t *)malloc(sizeof(uv_tcp_t));
uv_tcp_init(loop, client);
uv_work_t *req = (uv_work_t *)malloc(sizeof(uv_work_t));
begin_read_req *read_req = (begin_read_req *)malloc(sizeof(begin_read_req));
read_req->client = (uv_stream_t *)client;
read_req->read_cb = echo_read;
read_req->alloc_cb = alloc_buffer;
req->data = read_req;
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, alloc_buffer, echo_read);
// uv_queue_work(workloop[0], req, acceptClientRead, nullptr);
}
else {
uv_close((uv_handle_t *)client, nullptr);
}
}
void timer_callback(uv_timer_t* handle) {
cout << std::this_thread::get_id() << "---------" << "hello" << endl;
}
int main() {
uv_tcp_t server{};
uv_tcp_init(loop, &server);
uv_ip4_addr("0.0.0.0", 8790, &addr);
uv_tcp_bind(&server, (const struct sockaddr *) &addr, 0);
uv_listen((uv_stream_t *)&server, 511, on_new_connection);
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
Of course, I can make the write step asynchronous in the method "echo_read", but I didn't do anything before the write, can I make the demo multi-thread in another way to improve the throughput?

ZeroMQ PUB/SUB bind subscriber

I'm studying ZeroMQ with myself.
I tested PUB as a server(bind), SUB as a client(connect) and worked fine. Opposite (PUB as a client(connect), SUB as a server(bind)) also works fine.
When I connect a another SUB socket as client something goes wrong without any exception or errors.
here's my example code.
#include <zmq.hpp>
#include <string>
#include <iostream>
#include <unistd.h>
#include <thread>
class ZMQSock
{
public:
ZMQSock(const char* addr)
{
if (addr != NULL)
{
mctx = new zmq::context_t(1);
mszAddr = new char[strlen(addr) + 1];
snprintf(mszAddr, strlen(addr) + 1, "%s", addr);
}
}
virtual ~ZMQSock()
{
if (msock != nullptr)
delete msock;
if (mctx != nullptr)
delete mctx;
if (mszAddr != nullptr)
delete [] mszAddr;
}
int zbind()
{
if (msock != nullptr)
msock->bind(mszAddr);
else return -1;
return 0;
}
int zconnect()
{
if (msock != nullptr)
msock->connect(mszAddr);
else return -1;
return 0;
}
void start()
{
if (mbthread != false)
return ;
mbthread = true;
mhthread = std::thread(std::bind(&ZMQSock::run, this));
}
virtual void stop()
{
if (mbthread == false)
return ;
mbthread = false;
if (mhthread.joinable())
mhthread.join();
}
virtual void run() = 0;
protected:
char* mszAddr{nullptr};
zmq::context_t* mctx{nullptr};
zmq::socket_t* msock{nullptr};
bool mbthread{false};
std::thread mhthread;
};
class ZPublisher : public ZMQSock
{
public:
ZPublisher(const char* addr) : ZMQSock(addr)
{
if (msock == nullptr)
{
msock = new zmq::socket_t(*mctx, ZMQ_PUB);
}
}
virtual ~ZPublisher()
{
}
bool zsend(const char* data, const unsigned int length, bool sendmore=false)
{
zmq::message_t msg(length);
memcpy(msg.data(), data, length);
if (sendmore)
return msock->send(msg, ZMQ_SNDMORE);
return msock->send(msg);
}
void run()
{
if (mszAddr == nullptr)
return ;
if (strlen(mszAddr) < 6)
return ;
const char* fdelim = "1";
const char* first = "it sends to first. two can not recv this sentence!\0";
const char* sdelim = "2";
const char* second = "it sends to second. one can not recv this sentence!\0";
while (mbthread)
{
zsend(fdelim, 1, true);
zsend(first, strlen(first));
zsend(sdelim, 1, true);
zsend(second, strlen(second));
usleep(1000 * 1000);
}
}
};
class ZSubscriber : public ZMQSock
{
public:
ZSubscriber(const char* addr) : ZMQSock(addr)
{
if (msock == nullptr)
{
msock = new zmq::socket_t(*mctx, ZMQ_SUB);
}
}
virtual ~ZSubscriber()
{
}
void setScriberDelim(const char* delim, const int length)
{
msock->setsockopt(ZMQ_SUBSCRIBE, delim, length);
mdelim = std::string(delim, length);
}
std::string zrecv()
{
zmq::message_t msg;
msock->recv(&msg);
return std::string(static_cast<char*>(msg.data()), msg.size());
}
void run()
{
if (mszAddr == nullptr)
return ;
if (strlen(mszAddr) < 6)
return ;
while (mbthread)
{
std::cout << "MY DELIM IS [" << mdelim << "] - MSG : ";
std::cout << zrecv() << std::endl;
usleep(1000 * 1000);
}
}
private:
std::string mdelim;
};
int main ()
{
ZPublisher pub("tcp://localhost:5252");
ZSubscriber sub1("tcp://localhost:5252");
ZSubscriber sub2("tcp://*:5252");
pub.zconnect();
sub1.zconnect();
sub2.zbind();
sub1.setScriberDelim("1", 1);
sub2.setScriberDelim("2", 1);
pub.start();
std::cout << "PUB Server has been started.." << std::endl;
usleep(1000 * 1000);
sub1.start();
std::cout << "SUB1 Start." << std::endl;
sub2.start();
std::cout << "SUB2 Start." << std::endl;
int i = 0;
std::cout << "< Press any key to exit program. >" << std::endl;
std::cin >> i;
std::cout << "SUB1 STOP START" << std::endl;
sub1.stop();
std::cout << "SUB2 STOP START" << std::endl;
sub2.stop();
std::cout << "PUB STOP START" << std::endl;
pub.stop();
std::cout << "ALL DONE" << std::endl;
return 0;
}
What causes this? or Am I using PUB/SUB illegally?
You are connecting a SUB socket to a SUB socket, that is an invalid connection. In your case the PUB should bind and the SUBs should connect.

ZeroMQ Message Corrupts Data

I'm sending a struct over a ZeroMQ connection. Two fields are correct, but one is corrupted. To illustrate the problem, my code tries to read the content of the zmq message and create a new struct.
Output:
$ ./client
id = 100
successful = 1
data_size = 356515840
Segmentation fault
client.cc
#include <string>
#include <iostream>
#include "zmq.hpp"
#include "struct.h"
int main () {
zmq::context_t context (1);
zmq::socket_t socket (context, ZMQ_REQ);
socket.connect ("tcp://localhost:5555");
query q(100);
q.data_size = 3;
q.data = "abc";
q.successful = true;
zmq::message_t request = q.generate_message();
query test_query((char *) request.data());
return 0;
}
struct.h
#include <arpa/inet.h>
#include "zmq.hpp"
struct query {
long id;
bool successful;
long data_size;
std::string data;
query (long id) {
this->id = id;
this->successful = false;
this->data_size = 0;
this->data = "";
}
query (char *start) {
id = *((long*) start);
start += sizeof(long);
std::cout << "id = " << id << std::endl;
successful = *((bool*) start);
start += sizeof(bool);
std::cout << "successful = " << successful << std::endl;
data_size = *((long *) start);
data_size = ntohl(data_size);
start += sizeof(long);
std::cout << "data_size = " << data_size << std::endl;
data = std::string(start, data_size);
}
long get_total_size() {
return 2 * sizeof(long) + sizeof(bool) + data_size;
}
// The string is encoded by calling data.c_str()
zmq::message_t generate_message() {
long size = get_total_size();
zmq::message_t msg(size);
memcpy((void *) msg.data(), this, size);
char *loc_of_data = (char *) msg.data() + 2 * sizeof(long) + sizeof(bool);
memcpy((void *) loc_of_data, data.c_str(), data_size);
return msg;
}
};
You're ignoring padding.
Because you are using memcpy, you're putting the same layout in the message as exists in your structure in memory. And long, bool, long in a structure has padding for alignment purposes.
I suggest a substructure for your "plain-old-data", and stop thinking about 2 * sizeof(long) + sizeof (bool).

Vlan id is set to 0 when TPACKET_V2 is used

I have a problem about the usage of this TPACKET_V2 .
My problem is that after setting of this type of packet on socket, when I try to receive some packets I can't read the vlan id from the packet (of course from the header of the packet) the vlan_tci is ever 0.
Now I'm using open suse sp1 and when I run my program on sless sp2 I 'm able to get the vlan id with the same program that doesn't work on sless sp1 but the weird thing is that tcpdump is able to get the vlan id (on this sless) and tcpdump set the TPACKET_V2 (so this means that TPACKET_2 is supported)
My simple project is based on these functions , all called by the function createSocket , then there is a simple method that is reading packets on the socket and there I try to get informations on vlan id (there there is also the relative part used before with the TPACKET_V1)
#include <linux/if_packet.h>
#include <linux/if_ether.h>
#include <netinet/in.h>
#include <netinet/if_ether.h>
#include <netinet/ether.h>
#include <linux/filter.h>
#include <net/if.h>
#include <arpa/inet.h>
enum INTERFACE_T
{
RX_INTERFACE,
TX_INTERFACE
};
static const char* PKT_TYPE[];
// BLOCK_NUM*BLOCK_SIZE = FRAME_NUM*FRAME_SIZE
enum { RX_BLOCK_SIZE = 8192,
RX_BLOCK_NUM = 256,
RX_FRAME_SIZE = 2048,
RX_FRAME_NUM = 1024
};
enum { TX_BLOCK_SIZE = 8192,
TX_BLOCK_NUM = 256,
TX_FRAME_SIZE = 2048,
TX_FRAME_NUM = 1024
};
struct RxFrame {
struct tpacket2_hdr tp_h; // Packet header
uint8_t tp_pad[TPACKET_ALIGN(sizeof(tpacket2_hdr))-sizeof(tpacket2_hdr)];
struct sockaddr_ll sa_ll; // Link level address information
uint8_t sa_ll_pad[14]; //Alignment padding
struct ethhdr eth_h;
} __attribute__((packed));
struct TxFrame
{
struct tpacket_hdr tp_h; // Packet header
uint8_t tp_pad[TPACKET_ALIGN(sizeof(tpacket_hdr))-sizeof(tpacket_hdr)];
// struct vlan_ethhdr vlan_eth_h;
// struct arp arp;
} __attribute__((packed));
struct ring_buff {
struct tpacket_req req;
size_t size; // mmap size
size_t cur_frame;
struct iovec *ring_buffer_;
void *buffer; // mmap
};
int setIfFlags(short int flags)
{
struct ifreq ifr;
memset(&ifr, 0, sizeof(ifr));
strncpy(ifr.ifr_name, if_name_.c_str(), sizeof(ifr.ifr_name));
ifr.ifr_hwaddr.sa_family=getIfArptype();
ifr.ifr_flags |= flags;
if ( ioctl(socket_, SIOCSIFFLAGS, &ifr) == -1)
{
std::cout << "Error: ioctl(SIOSIFFLAGS) failed!" << std::endl;
return 1;
}
return 0;
}
int bindSocket()
{
struct sockaddr_ll sll;
memset(&sll, 0, sizeof(sll));
sll.sll_family = AF_PACKET;
sll.sll_protocol = htons(ETH_P_ALL);
sll.sll_ifindex = ifIndex_;
sll.sll_hatype = 0;
sll.sll_pkttype = 0;
sll.sll_halen = 0;
if (bind(socket_, (struct sockaddr *)&sll, sizeof(sll)) < 0) {
std::cout << "Error: bind() failed!" << std::endl;
return 1;
}
return 0;
}
int packetMmap(ring_buff * rb)
{
assert(rb);
rb->buffer = mmap(0, rb->size, PROT_READ | PROT_WRITE, MAP_SHARED, socket_, 0);
if (rb->buffer == MAP_FAILED) {
std::cout << "Error: mmap() failed!" << std::endl;
return 1;
}
return 0;
}
void packetMunmap(ring_buff * rb)
{
assert(rb);
if (rb->buffer)
{
munmap(rb->buffer, rb->size);
rb->buffer = NULL;
rb->size = 0;
}
}
int frameBufferCreate(ring_buff * rb)
{
assert(rb);
rb->ring_buffer_ = (struct iovec*) malloc(rb->req.tp_frame_nr * sizeof(*rb->ring_buffer_));
if (!rb->ring_buffer_) {
std::cout << "No memory available !!!" << std::endl;
return 1;
}
memset(rb->ring_buffer_, 0, rb->req.tp_frame_nr * sizeof(*rb->ring_buffer_));
for (unsigned int i = 0; i < rb->req.tp_frame_nr; i++) {
rb->ring_buffer_[i].iov_base = static_cast<void *>(static_cast<char *>(rb->buffer)+(i*rb->req.tp_frame_size));
rb->ring_buffer_[i].iov_len = rb->req.tp_frame_size;
}
return 0;
}
void setRingBuffer(struct ring_buff *ringbuf) { rb_ = ringbuf; }
int setVlanTaggingStripping()
{
socklen_t len;
int val;
unsigned int sk_type, tp_reserve, maclen, tp_hdrlen, netoff, macoff;
unsigned int tp_hdr_len;
unsigned int frame_size = RX_FRAME_SIZE;
val = TPACKET_V2;
len = sizeof(val);
if (getsockopt(socket_, SOL_PACKET, PACKET_HDRLEN, &val, &len) < 0) {
std::cout << "Error: getsockopt(SOL_PACKET, PACKET_HDRLEN) failed (can't get TPACKET_V2 header len on packet)" << std::endl;
return 1;
}
tp_hdr_len = val;
std::cout << "TPACKET_V2 header is supported (hdr len is " << val << ")"<< std::endl;
std::cout << "tpacket2_hdrs header is supported (hdr len is " << sizeof(tpacket2_hdr) << ")"<< std::endl;
val = TPACKET_V2;
if (setsockopt(socket_, SOL_PACKET, PACKET_VERSION, &val, sizeof(val)) < 0) {
std::cout << "Error: setsockopt(SOL_PACKET, PACKET_VERSION) failed (can't activate TPACKET_V2 on packet)" << std::endl;
return 1;
}
std::cout << "TPACKET_V2 version is configured !!! " << std::endl;
/* Reserve space for VLAN tag reconstruction */
val = VLAN_TAG_LEN;
if (setsockopt(socket_, SOL_PACKET, PACKET_RESERVE, &val, sizeof(val)) < 0) {
std::cout << "Error: setsockopt(SOL_PACKET, PACKET_RESERVE) failed (can't set up reserve on packet)" << std::endl;
return 1;
}
std::cout<< "Reserve space for VLAN tag reconstruction is configured !!! " << std::endl;
return 0;
}
int setSoBufforce(int optname, int buffSize)
{
if (setsockopt(socket_, SOL_SOCKET, SO_SNDBUFFORCE, &buffSize, sizeof(buffSize)) == -1)
{
std::cout << "Error: setsocketopt("<< (optname == SO_SNDBUFFORCE ? "SO_SNDBUFFORCE" : "SO_RCVBUFFORCE") << ") failed!" << std::endl;
return 1;
}
return 0;
}
createSocket(std::string ifName, INTERFACE_T ifType)
{
if (ifName.empty())
{
std::cout << "Error: interface is empty!" << std::endl;;
return NULL;
}
//Create the socket
if ( (socket_ = socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) == -1 )
{
std::cout << "Error: calling socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)) failed!" << std::endl;
}
std::cout << "Creating Socket on interface= " << ifName << " to listen to ETH_P_ALL"<<std::endl;;
s->setIfFlags(IFF_PROMISC|IFF_BROADCAST);
//allocate space for ring buffer
ring_buff *rb = (ring_buff *) malloc(sizeof(ring_buff));
// use the same size for RX/TX ring
//set the version , here I insert the use of TPACKET_V2!
setVlanTaggingStripping();
rb->req.tp_block_size = RX_BLOCK_SIZE;
rb->req.tp_block_nr = RX_BLOCK_NUM;
rb->req.tp_frame_size = RX_FRAME_SIZE;
rb->req.tp_frame_nr = RX_FRAME_NUM;
setPacketRing(PACKET_RX_RING,&rb->req);
rb->size = (rb->req.tp_block_size)*(rb->req.tp_block_nr);
rb->cur_frame = 0;
// Tweak send/rcv buffer size
int sndBufSz = 4194304; // Send buffer in bytes
int rcvBufSz = 4194304; // Receive buffer in bytes
if (setSoBufforce(SO_SNDBUFFORCE, sndBufSz))
{
//close socket
}
if (setSoBufforce(SO_RCVBUFFORCE, rcvBufSz))
{
//close socket
}
// Add ARP filter so we will only receive ARP packet on this socket
struct sock_filter BPF_code[6];
struct sock_fprog filter;
bindSocket();
if (packetMmap(rb))
{
std::cout << "Error: mmap() failed!" << std::endl;
//close socket
}
frameBufferCreate(rb);
setRingBuffer(rb);
}
and in my function for receive packets and I try to read informations and in particular h_vlan_TCI from but I receive ever 0x00 !!! Any suggestions?
struct vlan_ethhdr* vlan_eth_h = (struct vlan_ethhdr*)&frame->eth_h
void readRawSocket(socket_)
{
while (*(unsigned long*)rb->ring_buffer_[rb->cur_frame].iov_base)
{
RxFrame* frame = (RxFrame *)rb->ring_buffer_[rb->cur_frame].iov_base;
#if 0
tpacket_hdr* h = &frame->tp_h;
char buffer[256];
sprintf (buffer, " -tpacket(v1): status=%ld,len=%d,snaplen=%d,mac=%d,net=%d,sec=%d,usec=%d",
h->tp_status, h->tp_len, h->tp_snaplen, h->tp_mac,h->tp_net, h->tp_sec, h->tp_usec);
std::cout << std::string(buffer) << std::endl;
#else
tpacket2_hdr* h = &frame->tp_h;
char buffer[512];
sprintf (buffer, " -tpacket(v2): status=%d,len=%d,snaplen=%d,mac=%d,net=%d,sec=%d,nsec=%d,vlan_tci=%d (vlan_tci=0x%04x)",
h->tp_status, h->tp_len, h->tp_snaplen, h->tp_mac, h->tp_net, h->tp_sec, h->tp_nsec, h->tp_vlan_tci, ntohs(h->tp_vlan_tci));
std::cout << std::string(buffer) << std::endl;
#endif
if ( ETH_P_8021Q == ntohs(frame->eth_h.h_proto) )
{
struct vlan_ethhdr* vlan_eth_h = (struct vlan_ethhdr*)&frame->eth_h;
int vlan_tag = VLAN_TAG(ntohs(vlan_eth_h->h_vlan_TCI));
std::cout << " -Vlan " << vlan_tag << " packet to this host received";
}
rb->cur_frame = ( rb->cur_frame+1) % rx_socket_->getFrameNum();
} // while()
}
When the kernel removes the vlan it also changes eth_h.h_proto to the protocol after de vlan tag so ETH_P_8021Q == ntohs(frame->eth_h.h_proto) will most probably be false.
Also, if you are listening in the tagged interface (ie. eth0.100) instead of the physical interface (eth0) you will not see the tags.