I was thinking of making use of Boost Asio to read data from a Socket CAN.
There's nothing fancy going on in linux/can.h , and the device should
behave like the loopback interface, and be used with a raw socket.
Looking at the basic_raw_socket interface it seems that I can make use of
basic_raw_socket::assign to assign the native socket created with
socket( PF_CAN, SOCK_RAW, CAN_RAW );
This is what I have so far
namespace can {
class CanSocket {
public:
typedef boost::asio::ip::basic_endpoint<CanSocket> endpoint;
typedef boost::asio::ip::basic_resolver_query<CanSocket> resolver_query;
typedef boost::asio::ip::basic_resolver_iterator<CanSocket> resolver_iterator;
typedef boost::asio::basic_raw_socket<CanSocket> socket;
typedef boost::asio::ip::basic_resolver<CanSocket> resolver;
CanSocket()
: _protocol( CAN_RAW )
, _family( PF_CAN )
{
}
static CanSocket v4()
{
return CanSocket();
}
static CanSocket v6();
int type() const;
int protocol() const;
int family() const;
friend bool operator==(const CanSocket& p1, const CanSocket& p2)
{
return p1._protocol != p2._protocol || p1._family != p2._family;
}
friend bool operator!=(const CanSocket& p1, const CanSocket& p2)
{
return p1._protocol == p2._protocol || p1._family == p2._family;
}
private:
int _protocol;
int _family;
};
}
And this is how I use it in my application
boost::asio::io_service ioserv;
CanSocket::socket s( ioserv );
int sock = socket( PF_CAN, SOCK_RAW, CAN_RAW );
s.assign(CanSocket::v4(), sock);
struct ifreq ifr;
strcpy(ifr.ifr_name, "vcan0");
ioctl(sock, SIOCGIFINDEX, &ifr); /* ifr.ifr_ifindex gets filled
* with that device's index */
/* Select that CAN interface, and bind the socket to it. */
/* this should be the endpoint */
struct sockaddr_can addr;
addr.can_family = AF_CAN;
addr.can_ifindex = ifr.ifr_ifindex;
/* s.bind (....) */
bind( sock, (struct sockaddr*)&addr, sizeof(addr) );
What I don't quite get is how do I bind s to the local endpoint? There are no IPs or ports involved.
Is there anything else that should be implemented besides the endpoint to get it going?
Here is working example, assembled with help of this thread
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <iostream>
#include <net/if.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <linux/can.h>
#include <linux/can/raw.h>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
void data_send(void) {
std::cout << "omg sent" << std::endl;
}
void data_rec(struct can_frame &rec_frame,
boost::asio::posix::basic_stream_descriptor<> &stream) {
std::cout << std::hex << rec_frame.can_id << " ";
for (int i = 0; i < rec_frame.can_dlc; i++) {
std::cout << std::hex << int(rec_frame.data[i]) << " ";
}
std::cout << std::dec << std::endl;
stream.async_read_some(
boost::asio::buffer(&rec_frame, sizeof(rec_frame)),
boost::bind(data_rec, boost::ref(rec_frame), boost::ref(stream)));
}
int main(void) {
struct sockaddr_can addr;
struct can_frame frame;
struct can_frame rec_frame;
struct ifreq ifr;
int natsock = socket(PF_CAN, SOCK_RAW, CAN_RAW);
strcpy(ifr.ifr_name, "vcan0");
ioctl(natsock, SIOCGIFINDEX, &ifr);
addr.can_family = AF_CAN;
addr.can_ifindex = ifr.ifr_ifindex;
if (bind(natsock, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
perror("Error in socket bind");
return -2;
}
frame.can_id = 0x123;
frame.can_dlc = 2;
frame.data[0] = 0x11;
frame.data[1] = 0x23;
boost::asio::io_service ios;
boost::asio::posix::basic_stream_descriptor<> stream(ios);
stream.assign(natsock);
stream.async_write_some(boost::asio::buffer(&frame, sizeof(frame)),
boost::bind(data_send));
stream.async_read_some(
boost::asio::buffer(&rec_frame, sizeof(rec_frame)),
boost::bind(data_rec, boost::ref(rec_frame), boost::ref(stream)));
ios.run();
}
The solution is to use posix::stream_descriptor.
Just open the native socket, bind and then use posix::basic_stream_descriptor::assign.
Related
I am implementing a host name / ip address parser for a TCP/IP client / server program.
I can successfully cast IPv4 addresses from sockaddr_in* to boost::asio::ip::address_v4 but I fail to get the IPv6 conversion from struct sockaddr_in6* to boost::asio::ip::address_v6 right:
#include <iostream>
using std::cout;
#include <netdb.h>
#include <stdexcept>
using std::domain_error;
#include <sys/socket.h>
#include <string>
using std::string;
#include <vector>
using std::vector;
#include <boost/asio.hpp>
using boost::asio::ip::address;
using boost::asio::ip::address_v4;
using boost::asio::ip::address_v6;
vector<address> getAddresses(string const &hostname)
{
struct addrinfo req = {.ai_family = AF_UNSPEC, .ai_socktype = SOCK_STREAM};
struct addrinfo *pai;
int error = getaddrinfo(hostname.c_str(), nullptr, &req, &pai);
if (error)
throw domain_error("Could not resolve host name.");
vector<address> addresses;
for(struct addrinfo *info = pai; info != nullptr; info = info->ai_next) {
if (info->ai_family == AF_INET) {
auto ipv4socket = reinterpret_cast<struct sockaddr_in*>(info->ai_addr);
auto ipv4addr = address_v4(htonl(ipv4socket->sin_addr.s_addr));
addresses.emplace_back(ipv4addr);
}
/*
* TODO: Implement IPv6 support.
else {
auto ipv6socket = reinterpret_cast<struct sockaddr_in6*>(info->ai_addr);
auto ipv6base = reinterpret_cast<array<unsigned char, 16>>(ipv6socket->sin6_addr.__in6_u);
auto ipv6addr = address_v6(ipv6base, ipv6socket->sin6_scope_id);
addresses.emplace_back(ipv6addr);
}
*/
}
return addresses;
}
int main()
{
auto addresses = getAddresses("www.google.de");
for (auto ipa : addresses)
cout << "Address: " << ipa << "\n";
return 0;
}
Got it working:
vector<address> getAddresses(string const &hostname)
{
struct addrinfo req = {.ai_family = AF_UNSPEC, .ai_socktype = SOCK_STREAM};
struct addrinfo *pai;
int error = getaddrinfo(hostname.c_str(), nullptr, &req, &pai);
if (error)
throw domain_error("Could not resolve host name.");
vector<address> addresses;
for(struct addrinfo *info = pai; info != nullptr; info = info->ai_next) {
if (info->ai_family == AF_INET) {
auto ipv4socket = reinterpret_cast<struct sockaddr_in*>(info->ai_addr);
auto ipv4addr = address_v4(htonl(ipv4socket->sin_addr.s_addr));
addresses.emplace_back(ipv4addr);
} else {
auto ipv6socket = reinterpret_cast<struct sockaddr_in6*>(info->ai_addr);
array<unsigned char, 16> bytes = {
ipv6socket->sin6_addr.s6_addr[0],
ipv6socket->sin6_addr.s6_addr[1],
ipv6socket->sin6_addr.s6_addr[2],
ipv6socket->sin6_addr.s6_addr[3],
ipv6socket->sin6_addr.s6_addr[4],
ipv6socket->sin6_addr.s6_addr[5],
ipv6socket->sin6_addr.s6_addr[6],
ipv6socket->sin6_addr.s6_addr[7],
ipv6socket->sin6_addr.s6_addr[8],
ipv6socket->sin6_addr.s6_addr[9],
ipv6socket->sin6_addr.s6_addr[10],
ipv6socket->sin6_addr.s6_addr[11],
ipv6socket->sin6_addr.s6_addr[12],
ipv6socket->sin6_addr.s6_addr[13],
ipv6socket->sin6_addr.s6_addr[14],
ipv6socket->sin6_addr.s6_addr[15]
};
auto ipv6addr = address_v6(bytes, ipv6socket->sin6_scope_id);
addresses.emplace_back(ipv6addr);
}
}
return addresses;
}
Next station: codereview
I have created a client-server program based on one of the tests in the gRPC repo.
The UDP code in gRPC is not built on top of its RPC layer, and so there is no notion of stubs, etc.
My code works, though I've noticed that under just a mild stress, a huge fraction of messages get dropped, and I'm not sure if it's entirely due to the lossy nature of UDP or it's something about my code.
I have two questions:
Main question: Is there a gRPC-way to set deadlines for UDP messages? I am familiar with ClientContext and its deadline feature, but I don't know how to use it in a non-TCP RPC-less code. If not, what is the best way to achieve this?
Is a drop rate of %50 for a UDP localhost communication sensible?
My code (It's quite long, so just attaching it for reference. My main question doesn't require reading the code):
#include <netdb.h>
#include <string>
#include <thread>
#include <vector>
// grpc headers
#include <grpcpp/grpcpp.h>
#include "src/core/lib/iomgr/udp_server.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
using namespace std;
int client_port = 6666;
int server_port = 5555;
int num_of_msgs = 1000;
int listening_port;
int remote_port;
int fd;
int received_msgs_cnt = 0;
vector<bool> is_received(num_of_msgs, false);
enum Role {
CLIENT,
SERVER
};
struct Request {
int id;
};
struct Response {
int id;
};
Role role;
bool udpServerFinished = false;
void sendUdp(const char *hostname, int port, const char* payload, size_t size) {
auto transferred = write(fd, (void*)payload, size);
assert(size == transferred);
}
/***************************************
* UDP Handler class
* (will be generated by factory class)
* upon receiving a new message, the Read()
* function is invoked
***************************************/
class UdpHandler : public GrpcUdpHandler {
public:
UdpHandler(grpc_fd *emfd, void *user_data):
GrpcUdpHandler(emfd, user_data), emfd_(emfd) {
}
virtual ~UdpHandler() {}
static void startLoop(volatile bool &udpServerFinished) {
grpc_core::ExecCtx exec_ctx;
grpc_millis deadline;
gpr_mu_lock(g_mu);
while (!udpServerFinished) {
deadline = grpc_timespec_to_millis_round_up(gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(10000, GPR_TIMESPAN)));
grpc_pollset_worker *worker = nullptr;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work", grpc_pollset_work(UdpHandler::g_pollset, &worker, deadline)));
gpr_mu_unlock(UdpHandler::g_mu);
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(UdpHandler::g_mu);
}
gpr_mu_unlock(g_mu);
}
static grpc_pollset *g_pollset;
static gpr_mu *g_mu;
public:
static int g_num_listeners;
protected:
bool Read() override {
char read_buffer[512];
ssize_t byte_count;
gpr_mu_lock(UdpHandler::g_mu);
byte_count = recv(grpc_fd_wrapped_fd(emfd()), read_buffer, sizeof(read_buffer), 0);
processIncomingMsg((void*)read_buffer, byte_count);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(UdpHandler::g_mu);
return false;
}
void processIncomingMsg(void* msg, ssize_t size) {
received_msgs_cnt++;
(void)size;
int id;
if (role == Role::CLIENT) {
Response res;
assert(size == sizeof(Response));
memcpy((void*)&res, (void*)msg, size);
id = res.id;
cout << "Msg: response for request " << res.id << endl;
}
else {
Request req;
assert(size == sizeof(Request));
memcpy((void*)&req, (void*)msg, size);
id = req.id;
cout << "Msg: request " << req.id << endl;
// send response
Response res;
res.id = req.id;
sendUdp("127.0.0.1", remote_port, (const char*)&res, sizeof(Response));
}
// check for termination condition (both for client and server)
if (received_msgs_cnt == num_of_msgs) {
cout << "This is the last msg" << endl;
udpServerFinished = true;
}
// mark the id of the current message
is_received[id] = true;
// if this was the last message, print the missing msg ids
if (id == num_of_msgs - 1) {
cout << "missing ids: ";
for (int i = 0; i < num_of_msgs; i++) {
if (is_received[i] == false)
cout << i << ", ";
}
cout << endl;
cout << "% of missing messages: "
<< 1.0 - ((double)received_msgs_cnt / num_of_msgs) << endl;
}
}
void OnCanWrite(void* /*user_data*/, grpc_closure* /*notify_on_write_closure*/) override {
gpr_mu_lock(g_mu);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(g_mu);
}
void OnFdAboutToOrphan(grpc_closure *orphan_fd_closure, void* /*user_data*/) override {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, orphan_fd_closure, GRPC_ERROR_NONE);
}
grpc_fd *emfd() { return emfd_; }
private:
grpc_fd *emfd_;
};
int UdpHandler::g_num_listeners = 1;
grpc_pollset *UdpHandler::g_pollset;
gpr_mu *UdpHandler::g_mu;
/****************************************
* Factory class (generated UDP handler)
****************************************/
class UdpHandlerFactory : public GrpcUdpHandlerFactory {
public:
GrpcUdpHandler *CreateUdpHandler(grpc_fd *emfd, void *user_data) override {
UdpHandler *handler = new UdpHandler(emfd, user_data);
return handler;
}
void DestroyUdpHandler(GrpcUdpHandler *handler) override {
delete reinterpret_cast<UdpHandler *>(handler);
}
};
/****************************************
* Main function
****************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
string r(argv[1]);
if (r == "client") {
cout << "Client is initializing to send requests!" << endl;
role = Role::CLIENT;
listening_port = client_port;
remote_port = server_port;
}
else if (r == "server") {
cout << "Server is initializing to accept requests!" << endl;
role = Role::SERVER;
listening_port = server_port;
remote_port = client_port;
}
else {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
/********************************************************
* Initialize UDP Listener
********************************************************/
/* Initialize the grpc library. After it's called,
* a matching invocation to grpc_shutdown() is expected. */
grpc_init();
grpc_core::ExecCtx exec_ctx;
UdpHandler::g_pollset = static_cast<grpc_pollset *>(
gpr_zalloc(grpc_pollset_size()));
grpc_pollset_init(UdpHandler::g_pollset, &UdpHandler::g_mu);
grpc_resolved_address resolved_addr;
struct sockaddr_storage *addr =
reinterpret_cast<struct sockaddr_storage *>(resolved_addr.addr);
int svrfd;
grpc_udp_server *s = grpc_udp_server_create(nullptr);
grpc_pollset *pollsets[1];
memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
addr->ss_family = AF_INET;
grpc_sockaddr_set_port(&resolved_addr, listening_port);
/* setup UDP server */
UdpHandlerFactory handlerFactory;
int rcv_buf_size = 1024;
int snd_buf_size = 1024;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
snd_buf_size, &handlerFactory,
UdpHandler::g_num_listeners) > 0);
svrfd = grpc_udp_server_get_fd(s, 0);
GPR_ASSERT(svrfd >= 0);
GPR_ASSERT(getsockname(svrfd, (struct sockaddr *) addr,
(socklen_t *) &resolved_addr.len) == 0);
GPR_ASSERT(resolved_addr.len <= sizeof(struct sockaddr_storage));
pollsets[0] = UdpHandler::g_pollset;
grpc_udp_server_start(s, pollsets, 1, nullptr);
string addr_str = grpc_sockaddr_to_string(&resolved_addr, 1);
cout << "UDP Server listening on: " << addr_str << endl;
thread udpPollerThread(
UdpHandler::startLoop, ref(udpServerFinished));
/********************************************************
* Establish connection to the other side
********************************************************/
struct sockaddr_in serv_addr;
struct hostent *server = gethostbyname("127.0.0.1");
bzero((char *) &serv_addr, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
bcopy((char *) server->h_addr,
(char *) &serv_addr.sin_addr.s_addr,
server->h_length);
serv_addr.sin_port = htons(remote_port);
fd = socket(serv_addr.sin_family, SOCK_DGRAM, 0);
GPR_ASSERT(fd >= 0);
GPR_ASSERT(connect(fd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) == 0);
/********************************************************
* Send requests
********************************************************/
if (role == Role::CLIENT) {
static int counter = 0;
for (int i = 0; i < num_of_msgs; i++) {
Request req;
req.id = counter++;
cout << "Sending request " << req.id << endl;
sendUdp("127.0.0.1", remote_port, (char*)&req, sizeof(Request));
}
}
/********************************************************
* wait for client to finish
********************************************************/
udpPollerThread.join();
/********************************************************
* cleanup
********************************************************/
close(fd);
gpr_free(UdpHandler::g_pollset);
grpc_shutdown();
cout << "finished successfully!" << endl;
return 0;
}
Compiled with:
-std=c++17 -I$(GRPC_DIR) -I$(GRPC_DIR)/third_party/abseil-cpp.
Linked with:
pkg-config --libs grpc++
I am struggling a UDP packet sniffing program which shall capture packets as efficient as wireshark. What I simply do is opening a UDP socket with highest thread priority to capture packets from 192.168.2.20 over port 5001.
After quite a few of trials (sending a couple of seconds of UDP transmission from another computer which has 192.168.2.20 interface on port 5001 using iperf), I come up with the solution of producer/consumer multithreaded program under c++. My objection is to printout the size and the identification number of received packet until the transmission ends (the program will run for weeks).
So, I use the producer buffer as a queue to capture received UDP packets until reaching a queue limit (i.e. 40000). After the limit is achieved, the producer copies its content into another queue buffer and clears its content to continue receiving UDP packets which consumer thread will utilizes so that no thread synchronization will be needed. However, my program does not work perfectly. Below is my code. How can I achieve my goal (printing out total number of received UDP packets and its identification numbers more efficiently)
#include <iostream>
#include <thread>
#include <array>
#include <vector>
#include <mutex>
#include <string>
#include <unistd.h>
#include <condition_variable>
#include <queue>
#include <algorithm>
#include <string.h>
#include <netinet/ip.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <chrono>
#include <sys/time.h>
#include <ctime>
#include <numeric>
using namespace std;
const int BUFFER_SIZE = 2000;
#define ETH_DATA_LEN 1512
#define UDP 0x11
#define SRC_ADDR "192.168.2.20"
mutex m;
mutex m_print;
bool is_qq_empty = true;
bool is_transmission_continue = true;
bool is_producer_started = false;
struct ReceiveBufferArray {
uint8_t buf[ETH_DATA_LEN];
int id;
time_t time;
int index;
};
vector<int> packetSize;
vector<int> consume_buffer;
vector<int> loss_buffer;
vector<std::time_t> time_buffer;
std::queue<ReceiveBufferArray> qq;
std::queue<ReceiveBufferArray> qq_copy;
int gmSocket;
struct sockaddr_in gmClientAddr;
struct sockaddr_in gmServerAddr;
socklen_t gmClientLen = sizeof(gmServerAddr);
int openSocket(const std::string &IpAddress, int Port)
{
int ret;
struct timeval timeout;
int optval = 1;
gmSocket = socket(AF_INET, SOCK_RAW, IPPROTO_UDP);
if (gmSocket < 0)
{
std::cout << "cannot Open datagram socket!! Ip: " << IpAddress << " - Port " << std::to_string(Port) << std::endl;
return -1;
}
/* Bind our local address so that the client can send to us */
gmServerAddr.sin_family = AF_INET;
gmServerAddr.sin_addr.s_addr =INADDR_ANY;
gmServerAddr.sin_port = htons(Port);
timeout.tv_sec = 10;// timeout for 10seconds
timeout.tv_usec = 0;
setsockopt(gmSocket, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout));
setsockopt(gmSocket, SOL_SOCKET, SO_REUSEPORT, &optval, sizeof(optval));
setsockopt(gmSocket, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(optval));
std::cout << "Socket has been opened. Ip: " << IpAddress << " - Port " << std::to_string(Port) << std::endl;
return 0;
}
void clear(std::queue<ReceiveBufferArray> &q)
{
std::queue<ReceiveBufferArray> empty;
std::swap(q,empty);
}
std::queue<ReceiveBufferArray> copy_queue(std::queue<ReceiveBufferArray> &q)
{
std::queue<ReceiveBufferArray>Q2 =q;
return Q2;
}
void consumer_thread()
{
struct sockaddr_in source_socket_address, dest_socket_address;
memset(&source_socket_address, 0, sizeof(source_socket_address));
memset(&dest_socket_address, 0, sizeof(dest_socket_address));
uint8_t ethernet_data[ETH_DATA_LEN];
int old_val = 99999;
bool start_copying_flag = false;
while (is_transmission_continue || qq_copy.empty())
{
if (!qq_copy.empty())
{
start_copying_flag = true;
// Record start time
m.lock();
std::copy(std::begin(qq_copy.front().buf),std::end(qq_copy.front().buf), std::begin(ethernet_data));
qq_copy.pop();
m.unlock();
struct iphdr *ip_packet = (struct iphdr *)ethernet_data;
if((ip_packet->saddr == inet_addr(SRC_ADDR)) && (ip_packet->protocol == UDP))
{
consume_buffer.push_back(ntohs(ip_packet->id));
std::cout << "id: " << std::to_string(ntohs(ip_packet->id))
<< ", Packet Number: " << std::to_string(consume_buffer.size())<<endl;
}
usleep(1);
}else if(qq_copy.empty() && start_copying_flag)
{
if(qq.size()>0)
{
m.lock();//##################################################3
qq_copy = copy_queue(qq);
clear(qq);
m.unlock();//##################################################3
}
}
}
}
void producer_thread()
{
int packet_size;
openSocket(SRC_ADDR,5001);
ReceiveBufferArray _rbuf;
int counter = 0;
while (is_transmission_continue)
{
packet_size = recvfrom(gmSocket , _rbuf.buf , ETH_DATA_LEN , 0 , NULL, NULL);
if (qq.size() < 40000)
{
counter++;
m.lock();
qq.push(_rbuf);
m.unlock();
std::cout <<"Packet Size : " << counter << endl;
}else {
std::cout << "PRODUCER EMPTY" << endl;
m.lock();//##################################################3
qq_copy = copy_queue(qq);
m.unlock();//##################################################3
clear(qq);
is_producer_started = true;
}
if((packet_size < 0) && is_producer_started){
is_transmission_continue =false;
}
}
std::cout << "PRODUCER DONE" << endl;
}
int main()
{
setpriority(PRIO_PROCESS, 0, -20);
thread cons(consumer_thread);
thread prod(producer_thread);
prod.join();
cons.join();
return 0;
}
I write a C++ dome of tcp server with the libuv. When I check the cpu performance, I found the dome is a single thread running, how can I implement it with multi-thread?
Currently, the dome can hanlde 100,000+ tcp request per second, it can only eat 1 CPU.
Code:
#include <iostream>
#include <atomic>
#include "uv.h"
#include <thread>
#include <mutex>
#include <map>
using namespace std;
auto loop = uv_default_loop();
struct sockaddr_in addr;
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
typedef struct {
uv_stream_t* client;
uv_alloc_cb alloc_cb;
uv_read_cb read_cb;
} begin_read_req;
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
}
void free_write_req(uv_write_t *req) {
write_req_t *wr = (write_req_t*)req;
free(wr->buf.base);
free(wr);
}
void echo_write(uv_write_t *req, int status) {
if (status) {
fprintf(stderr, "Write error %s\n", uv_strerror(status));
}
free_write_req(req);
}
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (nread > 0) {
auto req = (write_req_t*)malloc(sizeof(write_req_t));
auto *aaa = (char*)malloc(5);
aaa[0] = '+';
aaa[1] = 'O';
aaa[2] = 'K';
aaa[3] = '\r';
aaa[4] = '\n';
req->buf = uv_buf_init(aaa, 5);
uv_write((uv_write_t*)req, client, &req->buf, 1, echo_write);
}
if (nread < 0) {
if (nread != UV_EOF)
fprintf(stderr, "Read error %s\n", uv_err_name(static_cast<unsigned int>(nread)));
uv_close((uv_handle_t*)client, nullptr);
}
free(buf->base);
}
void acceptClientRead(uv_work_t *req) {
begin_read_req *data = (begin_read_req *)req->data;
uv_read_start(data->client, data->alloc_cb, data->read_cb);
}
void on_new_connection(uv_stream_t *server, int status) {
if (status < 0) {
cout << "New connection error:" << uv_strerror(status);
return;
}
uv_tcp_t *client = (uv_tcp_t *)malloc(sizeof(uv_tcp_t));
uv_tcp_init(loop, client);
uv_work_t *req = (uv_work_t *)malloc(sizeof(uv_work_t));
begin_read_req *read_req = (begin_read_req *)malloc(sizeof(begin_read_req));
read_req->client = (uv_stream_t *)client;
read_req->read_cb = echo_read;
read_req->alloc_cb = alloc_buffer;
req->data = read_req;
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, alloc_buffer, echo_read);
// uv_queue_work(workloop[0], req, acceptClientRead, nullptr);
}
else {
uv_close((uv_handle_t *)client, nullptr);
}
}
void timer_callback(uv_timer_t* handle) {
cout << std::this_thread::get_id() << "---------" << "hello" << endl;
}
int main() {
uv_tcp_t server{};
uv_tcp_init(loop, &server);
uv_ip4_addr("0.0.0.0", 8790, &addr);
uv_tcp_bind(&server, (const struct sockaddr *) &addr, 0);
uv_listen((uv_stream_t *)&server, 511, on_new_connection);
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
Of course, I can make the write step asynchronous in the method "echo_read", but I didn't do anything before the write, can I make the demo multi-thread in another way to improve the throughput?
I am unsure how to solve my error. How do I make is so that my void run() function can see this variable 'intf'? the value of intf itself was declared separately in a .cnf file. Thank you
My errors are as follows
monreqserver.cc: In member function 'void Pds::MyXtcMonitorServer::run()':
monreqserver.cc:57: error: 'intf' was not declared in this scope
My code is as follows:
#include "pdsdata/app/XtcMonitorServer.hh"
#include <errno.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <unistd.h>
#include <sys/types.h>
#include <time.h>
#include <sys/ioctl.h>
#include <net/if.h>
#include <stdlib.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
#define mult_address "225.0.0.37"
#define mult_port "1100"
namespace Pds {
class MyXtcMonitorServer : public XtcMonitorServer {
public:
MyXtcMonitorServer(const char* tag,
unsigned sizeofBuffers,
unsigned numberofEvBuffers,
unsigned numberofEvQueues, const char * intf) :
XtcMonitorServer(tag,
sizeofBuffers,
numberofEvBuffers,
numberofEvQueues)
{
_init();
}
~MyXtcMonitorServer() {}
public:
void run() {
//////////////
//udp socket//
//////////////
int udp_socket_info;
struct sockaddr_in udp_server;
udp_socket_info = socket(AF_INET, SOCK_DGRAM, 0);
if (udp_socket_info == -1) {
puts("Could not create socket");
}
udp_server.sin_addr.s_addr = inet_addr(mult_address);
udp_server.sin_port = htons(1100);
udp_server.sin_family = AF_INET;
ifreq ifr;
ifr.ifr_addr.sa_family = AF_INET;
strcpy(ifr.ifr_name, intf);
if (ioctl(udp_socket_info, SIOCGIFADDR, &ifr)<0) {
perror("SIOCGIFADDR failed");
}
char* port = "1100";
char* ip = inet_ntoa(((struct sockaddr_in *)&ifr.ifr_addr)->sin_addr);
char* zero = "/0";
char ip_port[100];
sprintf(ip_port, "%s %s %s", ip, port, zero);
sendto(udp_socket_info , ip_port , strlen(ip_port), 0, (struct sockaddr *)&udp_server, sizeof(udp_server));
puts("Message Sent");
///////////////////////////////
///SETTING UP TCP CONNECTION///
///////////////////////////////
int tcp_socket_info, tcp_new_socket;
struct sockaddr_in tcp_server, tcp_client;
tcp_socket_info = socket(AF_INET, SOCK_STREAM, 0);
if (tcp_socket_info == -1) {
printf("Could not create socket");
}
tcp_server.sin_addr.s_addr = INADDR_ANY;
tcp_server.sin_family = AF_INET;
tcp_server.sin_port = htons(1100);
int y=1;
if(setsockopt(tcp_socket_info, SOL_SOCKET, SO_REUSEADDR, (char*)&y, sizeof(y)) == -1) {
perror("set reuseaddr");
}
//binds socket
if (bind(tcp_socket_info, (struct sockaddr *)&tcp_server, sizeof(tcp_server)) < 0) {
perror("Bind error");
}
//listen
listen(tcp_socket_info , 5);
//waiting for connection
puts("Waiting for incoming connections...");
int c = sizeof(struct sockaddr_in);
//accept connection loop
tcp_new_socket = accept(tcp_socket_info, (struct sockaddr *)&tcp_client, (socklen_t*)&c);
puts("Connection accepted");
while(1)
sleep(1);
}
private:
void _copyDatagram(Dgram* dg, char*) {}
void _deleteDatagram(Dgram* dg) {}
void _requestDatagram() {}
private:
};
};
using namespace Pds;
void usage(char* progname) {
printf("Usage: %s -p <platform> -P <partition> -i <node mask> -n <numb shm buffers> -s <shm buffer size> [-q <# event queues>] [-t <tag name>] [-d] [-c] [-g <max groups>] [-h]\n", progname);
}
int main(int argc, char** argv) {
const unsigned NO_PLATFORM = unsigned(-1UL);
unsigned platform=NO_PLATFORM;
const char* partition = 0;
const char* tag = 0;
const char* intf = 0;
int numberOfBuffers = 0;
unsigned sizeOfBuffers = 0;
unsigned nevqueues = 1;
unsigned node = 0xffff;
unsigned nodes = 6;
bool ldist = false;
int c;
while ((c = getopt(argc, argv, "I:p:i:g:n:P:s:q:t:dch")) != -1) {
errno = 0;
char* endPtr;
switch (c) {
case 'p':
platform = strtoul(optarg, &endPtr, 0);
if (errno != 0 || endPtr == optarg) platform = NO_PLATFORM;
break;
case 'I':
intf = optarg;
break;
case 'i':
node = strtoul(optarg, &endPtr, 0);
break;
case 'g':
nodes = strtoul(optarg, &endPtr, 0);
break;
case 'n':
sscanf(optarg, "%d", &numberOfBuffers);
break;
case 'P':
partition = optarg;
break;
case 't':
tag = optarg;
break;
case 'q':
nevqueues = strtoul(optarg, NULL, 0);
break;
case 's':
sizeOfBuffers = (unsigned) strtoul(optarg, NULL, 0);
break;
case 'd':
ldist = true;
break;
case 'h':
// help
usage(argv[0]);
return 0;
break;
default:
printf("Unrecogized parameter\n");
usage(argv[0]);
break;
}
}
if (!numberOfBuffers || !sizeOfBuffers || platform == NO_PLATFORM || !partition || node == 0xffff) {
fprintf(stderr, "Missing parameters!\n");
usage(argv[0]);
return 1;
}
if (numberOfBuffers<8) numberOfBuffers=8;
if (!tag) tag=partition;
printf("\nPartition Tag:%s\n", tag);
MyXtcMonitorServer* apps = new MyXtcMonitorServer(tag,
sizeOfBuffers,
numberOfBuffers,
nevqueues, intf);
apps->distribute(ldist);
apps->run();
return 0;
}
and the header file include is as follows:
#ifndef Pds_XtcMonitorServer_hh
#define Pds_XtcMonitorServer_hh
#include "pdsdata/app/XtcMonitorMsg.hh"
#include "pdsdata/xtc/TransitionId.hh"
#include <pthread.h>
#include <mqueue.h>
#include <queue>
#include <stack>
#include <vector>
#include <poll.h>
#include <time.h>
namespace Pds {
class Dgram;
class TransitionCache;
class XtcMonitorServer {
public:
XtcMonitorServer(const char* tag,
unsigned sizeofBuffers,
unsigned numberofEvBuffers,
unsigned numberofEvQueues, const char * intf);
virtual ~XtcMonitorServer();
public:
enum Result { Handled, Deferred };
Result events (Dgram* dg);
void discover ();
void routine ();
void unlink ();
public:
void distribute (bool);
protected:
int _init ();
private:
void _initialize_client();
mqd_t _openQueue (const char* name, mq_attr&);
void _flushQueue (mqd_t q);
void _flushQueue (mqd_t q, char* m, unsigned sz);
void _moveQueue (mqd_t iq, mqd_t oq);
bool _send (Dgram*);
void _update (int,TransitionId::Value);
void _clearDest (mqd_t);
private:
virtual void _copyDatagram (Dgram* dg, char*);
virtual void _deleteDatagram(Dgram* dg);
virtual void _requestDatagram();
private:
const char* _tag; // name of the complete shared memory segment
unsigned _sizeOfBuffers; // size of each shared memory datagram buffer
unsigned _numberOfEvBuffers; // number of shared memory buffers for events
unsigned _numberOfEvQueues; // number of message queues for events
char* _myShm; // the pointer to start of shared memory
XtcMonitorMsg _myMsg; // template for messages
mqd_t _discoveryQueue; // message queue for clients to get
// the TCP port for initiating connections
mqd_t _myInputEvQueue; // message queue for returned events
mqd_t* _myOutputEvQueue; // message queues[nclients] for distributing events
std::vector<int> _myTrFd; // TCP sockets to clients for distributing
// transitions and detecting disconnects.
std::vector<int> _msgDest; // last client to which the buffer was sent
TransitionCache* _transitionCache;
int _initFd;
pollfd* _pfd; /* poll descriptors for:
** 0 new client connections
** 1 buffer returned from client
** 2 events to be distributed
** 3+ transition send/receive */
int _nfd;
mqd_t _shuffleQueue; // message queue for pre-distribution event processing
mqd_t _requestQueue; // message queue for buffers awaiting request completion
timespec _tmo;
pthread_t _discThread; // thread for receiving new client connections
pthread_t _taskThread; // thread for datagram distribution
unsigned _ievt; // event vector
};
};
#endif
The variable intf is a local variable of main(). It is therefore unknown outside the scope of main(). This is why you can't access it in the member functions of your class.
Three possible solutions:
you make the variable global (as it seems to be a global parameter that applies to all the classes
you make the variable a public static variable in the class. You then can initialize it from main, by using the prefix of your class.
or you define it as parameter of the run() member function (and invoke run from main accordingly).
The last one is the on which I'd choose, but i don't know enough about the context to give more objective advices for the choice:
// in the class:
void run(const char*intf) { // for convenience you can use the same name
...
}
// in main:
...
apps->run(intf); // pass the local variable as parameter
Name intf is referred in function run. But the compiler does not see any its declaration before its usage in the function
void run() {
//...
strcpy(ifr.ifr_name, intf);
^^^^^