How to use the libuv to accept the tcp connection with multi-thread? - c++

I write a C++ dome of tcp server with the libuv. When I check the cpu performance, I found the dome is a single thread running, how can I implement it with multi-thread?
Currently, the dome can hanlde 100,000+ tcp request per second, it can only eat 1 CPU.
Code:
#include <iostream>
#include <atomic>
#include "uv.h"
#include <thread>
#include <mutex>
#include <map>
using namespace std;
auto loop = uv_default_loop();
struct sockaddr_in addr;
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
typedef struct {
uv_stream_t* client;
uv_alloc_cb alloc_cb;
uv_read_cb read_cb;
} begin_read_req;
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
}
void free_write_req(uv_write_t *req) {
write_req_t *wr = (write_req_t*)req;
free(wr->buf.base);
free(wr);
}
void echo_write(uv_write_t *req, int status) {
if (status) {
fprintf(stderr, "Write error %s\n", uv_strerror(status));
}
free_write_req(req);
}
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (nread > 0) {
auto req = (write_req_t*)malloc(sizeof(write_req_t));
auto *aaa = (char*)malloc(5);
aaa[0] = '+';
aaa[1] = 'O';
aaa[2] = 'K';
aaa[3] = '\r';
aaa[4] = '\n';
req->buf = uv_buf_init(aaa, 5);
uv_write((uv_write_t*)req, client, &req->buf, 1, echo_write);
}
if (nread < 0) {
if (nread != UV_EOF)
fprintf(stderr, "Read error %s\n", uv_err_name(static_cast<unsigned int>(nread)));
uv_close((uv_handle_t*)client, nullptr);
}
free(buf->base);
}
void acceptClientRead(uv_work_t *req) {
begin_read_req *data = (begin_read_req *)req->data;
uv_read_start(data->client, data->alloc_cb, data->read_cb);
}
void on_new_connection(uv_stream_t *server, int status) {
if (status < 0) {
cout << "New connection error:" << uv_strerror(status);
return;
}
uv_tcp_t *client = (uv_tcp_t *)malloc(sizeof(uv_tcp_t));
uv_tcp_init(loop, client);
uv_work_t *req = (uv_work_t *)malloc(sizeof(uv_work_t));
begin_read_req *read_req = (begin_read_req *)malloc(sizeof(begin_read_req));
read_req->client = (uv_stream_t *)client;
read_req->read_cb = echo_read;
read_req->alloc_cb = alloc_buffer;
req->data = read_req;
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, alloc_buffer, echo_read);
// uv_queue_work(workloop[0], req, acceptClientRead, nullptr);
}
else {
uv_close((uv_handle_t *)client, nullptr);
}
}
void timer_callback(uv_timer_t* handle) {
cout << std::this_thread::get_id() << "---------" << "hello" << endl;
}
int main() {
uv_tcp_t server{};
uv_tcp_init(loop, &server);
uv_ip4_addr("0.0.0.0", 8790, &addr);
uv_tcp_bind(&server, (const struct sockaddr *) &addr, 0);
uv_listen((uv_stream_t *)&server, 511, on_new_connection);
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
Of course, I can make the write step asynchronous in the method "echo_read", but I didn't do anything before the write, can I make the demo multi-thread in another way to improve the throughput?

Related

gRPC UDP with Deadline

I have created a client-server program based on one of the tests in the gRPC repo.
The UDP code in gRPC is not built on top of its RPC layer, and so there is no notion of stubs, etc.
My code works, though I've noticed that under just a mild stress, a huge fraction of messages get dropped, and I'm not sure if it's entirely due to the lossy nature of UDP or it's something about my code.
I have two questions:
Main question: Is there a gRPC-way to set deadlines for UDP messages? I am familiar with ClientContext and its deadline feature, but I don't know how to use it in a non-TCP RPC-less code. If not, what is the best way to achieve this?
Is a drop rate of %50 for a UDP localhost communication sensible?
My code (It's quite long, so just attaching it for reference. My main question doesn't require reading the code):
#include <netdb.h>
#include <string>
#include <thread>
#include <vector>
// grpc headers
#include <grpcpp/grpcpp.h>
#include "src/core/lib/iomgr/udp_server.h"
#include "src/core/lib/iomgr/socket_utils_posix.h"
#include "src/core/lib/iomgr/unix_sockets_posix.h"
#include "src/core/lib/iomgr/sockaddr_utils.h"
using namespace std;
int client_port = 6666;
int server_port = 5555;
int num_of_msgs = 1000;
int listening_port;
int remote_port;
int fd;
int received_msgs_cnt = 0;
vector<bool> is_received(num_of_msgs, false);
enum Role {
CLIENT,
SERVER
};
struct Request {
int id;
};
struct Response {
int id;
};
Role role;
bool udpServerFinished = false;
void sendUdp(const char *hostname, int port, const char* payload, size_t size) {
auto transferred = write(fd, (void*)payload, size);
assert(size == transferred);
}
/***************************************
* UDP Handler class
* (will be generated by factory class)
* upon receiving a new message, the Read()
* function is invoked
***************************************/
class UdpHandler : public GrpcUdpHandler {
public:
UdpHandler(grpc_fd *emfd, void *user_data):
GrpcUdpHandler(emfd, user_data), emfd_(emfd) {
}
virtual ~UdpHandler() {}
static void startLoop(volatile bool &udpServerFinished) {
grpc_core::ExecCtx exec_ctx;
grpc_millis deadline;
gpr_mu_lock(g_mu);
while (!udpServerFinished) {
deadline = grpc_timespec_to_millis_round_up(gpr_time_add(
gpr_now(GPR_CLOCK_MONOTONIC),
gpr_time_from_millis(10000, GPR_TIMESPAN)));
grpc_pollset_worker *worker = nullptr;
GPR_ASSERT(GRPC_LOG_IF_ERROR(
"pollset_work", grpc_pollset_work(UdpHandler::g_pollset, &worker, deadline)));
gpr_mu_unlock(UdpHandler::g_mu);
grpc_core::ExecCtx::Get()->Flush();
gpr_mu_lock(UdpHandler::g_mu);
}
gpr_mu_unlock(g_mu);
}
static grpc_pollset *g_pollset;
static gpr_mu *g_mu;
public:
static int g_num_listeners;
protected:
bool Read() override {
char read_buffer[512];
ssize_t byte_count;
gpr_mu_lock(UdpHandler::g_mu);
byte_count = recv(grpc_fd_wrapped_fd(emfd()), read_buffer, sizeof(read_buffer), 0);
processIncomingMsg((void*)read_buffer, byte_count);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(UdpHandler::g_mu);
return false;
}
void processIncomingMsg(void* msg, ssize_t size) {
received_msgs_cnt++;
(void)size;
int id;
if (role == Role::CLIENT) {
Response res;
assert(size == sizeof(Response));
memcpy((void*)&res, (void*)msg, size);
id = res.id;
cout << "Msg: response for request " << res.id << endl;
}
else {
Request req;
assert(size == sizeof(Request));
memcpy((void*)&req, (void*)msg, size);
id = req.id;
cout << "Msg: request " << req.id << endl;
// send response
Response res;
res.id = req.id;
sendUdp("127.0.0.1", remote_port, (const char*)&res, sizeof(Response));
}
// check for termination condition (both for client and server)
if (received_msgs_cnt == num_of_msgs) {
cout << "This is the last msg" << endl;
udpServerFinished = true;
}
// mark the id of the current message
is_received[id] = true;
// if this was the last message, print the missing msg ids
if (id == num_of_msgs - 1) {
cout << "missing ids: ";
for (int i = 0; i < num_of_msgs; i++) {
if (is_received[i] == false)
cout << i << ", ";
}
cout << endl;
cout << "% of missing messages: "
<< 1.0 - ((double)received_msgs_cnt / num_of_msgs) << endl;
}
}
void OnCanWrite(void* /*user_data*/, grpc_closure* /*notify_on_write_closure*/) override {
gpr_mu_lock(g_mu);
GPR_ASSERT(GRPC_LOG_IF_ERROR("pollset_kick",
grpc_pollset_kick(UdpHandler::g_pollset, nullptr)));
gpr_mu_unlock(g_mu);
}
void OnFdAboutToOrphan(grpc_closure *orphan_fd_closure, void* /*user_data*/) override {
grpc_core::ExecCtx::Run(DEBUG_LOCATION, orphan_fd_closure, GRPC_ERROR_NONE);
}
grpc_fd *emfd() { return emfd_; }
private:
grpc_fd *emfd_;
};
int UdpHandler::g_num_listeners = 1;
grpc_pollset *UdpHandler::g_pollset;
gpr_mu *UdpHandler::g_mu;
/****************************************
* Factory class (generated UDP handler)
****************************************/
class UdpHandlerFactory : public GrpcUdpHandlerFactory {
public:
GrpcUdpHandler *CreateUdpHandler(grpc_fd *emfd, void *user_data) override {
UdpHandler *handler = new UdpHandler(emfd, user_data);
return handler;
}
void DestroyUdpHandler(GrpcUdpHandler *handler) override {
delete reinterpret_cast<UdpHandler *>(handler);
}
};
/****************************************
* Main function
****************************************/
int main(int argc, char *argv[]) {
if (argc != 2) {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
string r(argv[1]);
if (r == "client") {
cout << "Client is initializing to send requests!" << endl;
role = Role::CLIENT;
listening_port = client_port;
remote_port = server_port;
}
else if (r == "server") {
cout << "Server is initializing to accept requests!" << endl;
role = Role::SERVER;
listening_port = server_port;
remote_port = client_port;
}
else {
cerr << "Usage: './run client' or './run server' " << endl;
return 1;
}
/********************************************************
* Initialize UDP Listener
********************************************************/
/* Initialize the grpc library. After it's called,
* a matching invocation to grpc_shutdown() is expected. */
grpc_init();
grpc_core::ExecCtx exec_ctx;
UdpHandler::g_pollset = static_cast<grpc_pollset *>(
gpr_zalloc(grpc_pollset_size()));
grpc_pollset_init(UdpHandler::g_pollset, &UdpHandler::g_mu);
grpc_resolved_address resolved_addr;
struct sockaddr_storage *addr =
reinterpret_cast<struct sockaddr_storage *>(resolved_addr.addr);
int svrfd;
grpc_udp_server *s = grpc_udp_server_create(nullptr);
grpc_pollset *pollsets[1];
memset(&resolved_addr, 0, sizeof(resolved_addr));
resolved_addr.len = static_cast<socklen_t>(sizeof(struct sockaddr_storage));
addr->ss_family = AF_INET;
grpc_sockaddr_set_port(&resolved_addr, listening_port);
/* setup UDP server */
UdpHandlerFactory handlerFactory;
int rcv_buf_size = 1024;
int snd_buf_size = 1024;
GPR_ASSERT(grpc_udp_server_add_port(s, &resolved_addr, rcv_buf_size,
snd_buf_size, &handlerFactory,
UdpHandler::g_num_listeners) > 0);
svrfd = grpc_udp_server_get_fd(s, 0);
GPR_ASSERT(svrfd >= 0);
GPR_ASSERT(getsockname(svrfd, (struct sockaddr *) addr,
(socklen_t *) &resolved_addr.len) == 0);
GPR_ASSERT(resolved_addr.len <= sizeof(struct sockaddr_storage));
pollsets[0] = UdpHandler::g_pollset;
grpc_udp_server_start(s, pollsets, 1, nullptr);
string addr_str = grpc_sockaddr_to_string(&resolved_addr, 1);
cout << "UDP Server listening on: " << addr_str << endl;
thread udpPollerThread(
UdpHandler::startLoop, ref(udpServerFinished));
/********************************************************
* Establish connection to the other side
********************************************************/
struct sockaddr_in serv_addr;
struct hostent *server = gethostbyname("127.0.0.1");
bzero((char *) &serv_addr, sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
bcopy((char *) server->h_addr,
(char *) &serv_addr.sin_addr.s_addr,
server->h_length);
serv_addr.sin_port = htons(remote_port);
fd = socket(serv_addr.sin_family, SOCK_DGRAM, 0);
GPR_ASSERT(fd >= 0);
GPR_ASSERT(connect(fd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) == 0);
/********************************************************
* Send requests
********************************************************/
if (role == Role::CLIENT) {
static int counter = 0;
for (int i = 0; i < num_of_msgs; i++) {
Request req;
req.id = counter++;
cout << "Sending request " << req.id << endl;
sendUdp("127.0.0.1", remote_port, (char*)&req, sizeof(Request));
}
}
/********************************************************
* wait for client to finish
********************************************************/
udpPollerThread.join();
/********************************************************
* cleanup
********************************************************/
close(fd);
gpr_free(UdpHandler::g_pollset);
grpc_shutdown();
cout << "finished successfully!" << endl;
return 0;
}
Compiled with:
-std=c++17 -I$(GRPC_DIR) -I$(GRPC_DIR)/third_party/abseil-cpp.
Linked with:
pkg-config --libs grpc++

C++ Cross-platform Socket with HTTP request

Currently, I am using a very simple code example which help me working with cross-platform sockets.
Github: Socket c++ cross platform
The problem comes when I was trying to make the http GET request
When I try simple requests, it returns me a correct value, but when I this complex request:
"GET /1.0/stock/aapl/batch?types=quote,news,chart&range=1m&last=10 HTTP/1.0\r\nHost: api.iextrading.com\r\nConnection: close\r\n\r\n"
It returns nothing!
//Main.cpp
#include <string>
#include <ActiveSocket.h>
#define SEND(a,b,c,d) send(a, (const char *)b, (int)c, d)
typedef unsigned char uint8;
int main(int argc, char **argv)
{
CActiveSocket socket; // Instantiate active socket object (defaults to TCP).
char time[50];
memset(&time, 0, 50);
char host[] = "api.iextrading.com";
char message[] = "GET /1.0/stock/aapl/batch?types=quote,news,chart&range=1m&last=10 HTTP/1.0\r\nHost: api.iextrading.com\r\nConnection: close\r\n\r\n";
socket.Initialize();
if (socket.Open(host, 80))
{
if (socket.Send((const uint8 *)message, strlen(message)))
{
socket.Receive(4096);//size of the buffer
socket.Close();
}
}
getchar();
return 1;
}
//CActiveSocket.h
int32 CSimpleSocket::Send(const uint8 *pBuf, size_t bytesToSend)
{
SetSocketError(SocketSuccess);
m_nBytesSent = 0;
switch(m_nSocketType)
{
case CSimpleSocket::SocketTypeTcp:
{
if (IsSocketValid())
{
if ((bytesToSend > 0) && (pBuf != NULL))
{
m_timer.Initialize();
m_timer.SetStartTime();
//---------------------------------------------------------
// Check error condition and attempt to resend if call
// was interrupted by a signal.
//---------------------------------------------------------
do
{
m_nBytesSent = SEND(m_socket, pBuf, bytesToSend, 0);
TranslateSocketError();
} while (GetSocketError() == CSimpleSocket::SocketInterrupted);
m_timer.SetEndTime();
}
}
break;
}
...
}
int32 CSimpleSocket::Receive(int32 nMaxBytes, uint8 * pBuffer )
{
m_nBytesReceived = 0;
if (IsSocketValid() == false)
{
return m_nBytesReceived;
}
uint8 * pWorkBuffer = pBuffer;
if ( pBuffer == NULL )
{
if ((m_pBuffer != NULL) && (nMaxBytes != m_nBufferSize))
{
delete [] m_pBuffer;
m_pBuffer = NULL;
}
if (m_pBuffer == NULL)
{
m_nBufferSize = nMaxBytes;
m_pBuffer = new uint8[nMaxBytes];
}
pWorkBuffer = m_pBuffer;
}
SetSocketError(SocketSuccess);
m_timer.Initialize();
m_timer.SetStartTime();
switch (m_nSocketType)
{
case CSimpleSocket::SocketTypeTcp:
{
do
{
m_nBytesReceived = RECV(m_socket, (pWorkBuffer +m_nBytesReceived), nMaxBytes, m_nFlags);
TranslateSocketError();
} while ((GetSocketError() == CSimpleSocket::SocketInterrupted));
break;
}
...
}

Memory usage with IOCP [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I am converting our code to use IOCP and I got the communication relatively stable, but the memory usage of the application is increasing. Looks like I am getting back (on completion function calls) much fewer objects of OverlappedEx than I create. My code is below. What am I doing wrong?
#ifndef NETWORK_DATA
#define NETWORK_DATA
#include <afxwin.h>
#include <vector>
#include <string>
#include "CriticalSectionLocker.h"
using namespace std;
DWORD NetworkManager::NetworkThread(void* param)
{
bool bRun = true;
while (bRun)
{
DWORD wait = ::WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0);
if (WAIT_OBJECT_0 == wait)
{
bRun = false;
DEBUG_LOG0("Shutdown event was signalled thread");
}
else
{
DWORD dwBytesTransfered = 0;
void* lpContext = nullptr;
OVERLAPPED* pOverlapped = nullptr;
BOOL bReturn = GetQueuedCompletionStatus(s_IOCompletionPort,
&dwBytesTransfered,
(LPDWORD)&lpContext,
&pOverlapped,
INFINITE);
if (nullptr == lpContext)
{
DEBUG_LOG0("invalid context");
/*continue;*/
}
else
{
if (bReturn && dwBytesTransfered > 0)
{
OverlappedEx* data = reinterpret_cast<OverlappedEx*>(pOverlapped);
ServerData* networkData = reinterpret_cast<ServerData*>(lpContext);
if (networkData && data)
{
switch(data->m_opType)
{
case OverlappedEx::OP_READ:
/*DEBUG_LOG4("device name: %s bytes received: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteReceive(dwBytesTransfered, data);
break;
case OverlappedEx::OP_WRITE:
/*DEBUG_LOG4("device name: %s bytes sent: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteSend(dwBytesTransfered, data);
break;
}
}
}
else
{
/*DEBUG_LOG2("GetQueuedCompletionStatus failed: bReturn: %d dwBytesTransferred: %u", bReturn, dwBytesTransfered);*/
}
}
}
}
return 0;
}
enum NetworkType
{
UDP,
TCP
};
struct OverlappedEx : public OVERLAPPED
{
enum OperationType
{
OP_READ,
OP_WRITE
};
const static int MAX_PACKET_SIZE = 2048;
WSABUF m_wBuf;
char m_buffer[MAX_PACKET_SIZE];
OperationType m_opType;
OverlappedEx()
{
Clear();
m_refCount = 1;
}
void AddRef()
{
::InterlockedIncrement(&m_refCount);
}
void Release()
{
::InterlockedDecrement(&m_refCount);
}
int Refcount() const
{
return InterlockedExchangeAdd((unsigned long*)&m_refCount, 0UL);
}
~OverlappedEx()
{
Clear();
}
void Clear()
{
memset(m_buffer, 0, MAX_PACKET_SIZE);
m_wBuf.buf = m_buffer;
m_wBuf.len = MAX_PACKET_SIZE;
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
hEvent = nullptr;
m_opType = OP_READ;
}
private:
volatile LONG m_refCount;
};
class ServerData
{
public:
const static int MAX_REVEIVE_QUEUE_SIZE = 100;
const static int MAX_PACKET_SIZE = 2048;
const static int MAX_SEND_QUEUE_SIZE = 10;
const static int MAX_RECEIVE_QUEUE_SIZE = 100;
const static int MAX_OVERLAPPED_STRUCTS = 20;
ServerData(NetworkType netType, const string& sName, CCommunicationManager::CommHandle handle,
SOCKET sock, HANDLE IOPort) :
m_sName(sName)
{
InitializeCriticalSection(&m_receiveQueLock);
InitializeCriticalSection(&m_objectLock);
m_Handle = handle;
m_Socket = sock;
m_nIPAddress = 0;
m_netType = netType;
m_bEnabled = true;
m_ovlpIndex = 0;
for (int i = 0; i < MAX_OVERLAPPED_STRUCTS; ++i)
{
m_olps.push_back(new OverlappedEx);
}
/* Associate socket with completion handle */
if (m_Socket != 0)
{
CreateIoCompletionPort( reinterpret_cast<HANDLE>(m_Socket), IOPort, reinterpret_cast<ULONG_PTR>(this), 0 );
}
}
~ServerData()
{
CriticalSectionLocker lock(&m_receiveQueLock);
DeleteCriticalSection(&m_receiveQueLock);
DeleteCriticalSection(&m_objectLock);
closesocket(m_Socket);
}
const string& Name() const { return m_sName; }
bool Enabled() const { return m_bEnabled; }
void SetEnabled(bool bEnabled)
{
m_bEnabled = bEnabled;
}
int Handle() const { return m_Handle; }
void SetHandle(int handle)
{
m_Handle = handle;
}
unsigned long IPAddress() const { return m_nIPAddress; }
SOCKET Socket() const
{
return m_Socket;
}
void SetSocket(SOCKET sock)
{
m_Socket = sock;
}
void SetIPAddress(unsigned long nIP)
{
m_nIPAddress = nIP;
}
bool ValidTelegram(const vector<char>& telegram) const
{
return false;
}
OverlappedEx* GetBuffer()
{
OverlappedEx* ret = nullptr;
if (!m_olps.empty())
{
ret = m_olps.front();
m_olps.pop_front();
}
return ret;
}
void CompleteReceive(size_t numBytes, OverlappedEx* data)
{
//DEBUG_LOG1("%d buffers are available", AvailableBufferCount());
if (numBytes > 0)
{
vector<char> v(data->m_buffer, data->m_buffer + numBytes);
ReceivedData rd;
rd.SetData(v);
EnqueReceiveMessage(rd);
}
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
// DEBUG_LOG1("Queue size: %d", m_olps.size());
}
StartReceiving();
}
void CompleteSend(size_t numBytes, OverlappedEx* data)
{
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
//DEBUG_LOG1("Queue size: %d", m_olps.size());
}
//DEBUG_LOG2("Object: %s num sent: %d", Name().c_str(), numBytes);
}
void StartReceiving()
{
DWORD bytesRecv = 0;
sockaddr_in senderAddr;
DWORD flags = 0;
int senderAddrSize = sizeof(senderAddr);
int rc = 0;
CriticalSectionLocker lock(&m_objectLock);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_READ;
olp->AddRef();
switch(GetNetworkType())
{
case UDP:
{
rc = WSARecvFrom(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(SOCKADDR *)&senderAddr,
&senderAddrSize, (OVERLAPPED*)olp, NULL);
}
break;
case TCP:
{
rc = WSARecv(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(OVERLAPPED*)olp, NULL);
}
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
void SetWriteBuf(const SendData& msg, OverlappedEx* data)
{
int len = min(msg.Data().size(), MAX_PACKET_SIZE);
memcpy(data->m_buffer, &msg.Data()[0], len);
data->m_wBuf.buf = data->m_buffer;
data->m_wBuf.len = len;
}
void StartSending(const SendData& msg)
{
DEBUG_LOG1("device name: %s", Name().c_str());
int rc = 0;
DWORD bytesSent = 0;
DWORD flags = 0;
SOCKET sock = Socket();
int addrSize = sizeof(sockaddr_in);
CriticalSectionLocker lock(&m_objectLock);
//UpdateOverlapped(OverlappedEx::OP_WRITE);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
DEBUG_LOG2("name: %s ************* NO AVAILABLE BUFFERS new size: %d ***************", Name().c_str(), m_olps.size());
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_WRITE;
olp->AddRef();
SetWriteBuf(msg, olp);
switch(GetNetworkType())
{
case UDP:
rc = WSASendTo(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (sockaddr*)&msg.SendAddress(),
addrSize, (OVERLAPPED*)olp, NULL);
break;
case TCP:
rc = WSASend(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (OVERLAPPED*)olp, NULL);
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
size_t ReceiveQueueSize()
{
CriticalSectionLocker lock(&m_receiveQueLock);
return m_receiveDataQueue.size();
}
void GetAllData(vector <ReceivedData> & data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
while (m_receiveDataQueue.size() > 0)
{
data.push_back(m_receiveDataQueue.front());
m_receiveDataQueue.pop_front();
}
}
void DequeReceiveMessage(ReceivedData& msg)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() > 0)
{
msg = m_receiveDataQueue.front();
m_receiveDataQueue.pop_front();
}
}
template <class T>
void EnqueReceiveMessage(T&& data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() <= MAX_RECEIVE_QUEUE_SIZE)
{
m_receiveDataQueue.push_back(data);
}
else
{
static int s_nLogCount = 0;
if (s_nLogCount % 100 == 0)
{
DEBUG_LOG2("Max queue size was reached handle id: %d in %s", Handle(), Name().c_str());
}
s_nLogCount++;
}
}
NetworkType GetNetworkType() const
{
return m_netType;
}
private:
ServerData(const ServerData&);
ServerData& operator=(const ServerData&);
private:
bool m_bEnabled; //!< This member flags if this reciever is enabled for receiving incoming connections.
int m_Handle; //!< This member holds the handle for this receiver.
SOCKET m_Socket; //!< This member holds the socket information for this receiver.
unsigned long m_nIPAddress; //!< This member holds an IP address the socket is bound to.
deque < ReceivedData > m_receiveDataQueue;
CRITICAL_SECTION m_receiveQueLock;
CRITICAL_SECTION m_objectLock;
string m_sName;
NetworkType m_netType;
deque<OverlappedEx*> m_olps;
size_t m_ovlpIndex;
};
#endif
your implementation of void Release() have no sense - you decrement m_refCount and so what ? must be
void Release()
{
if (!InterlockedDecrement(&m_refCount)) delete this;
}
as result you never free OverlappedEx* data - this what i just view and this give memory leak.
also can advice - use WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0); this is bad idea for detect shutdown. call only GetQueuedCompletionStatus and for shutdown call PostQueuedCompletionStatus(s_IOCompletionPort, 0, 0, 0) several times(number or threads listen on s_IOCompletionPort) and if thread view pOverlapped==0 - just exit.
use
OverlappedEx* data = static_cast<OverlappedEx*>(pOverlapped);
instead of reinterpret_cast
make ~OverlappedEx() private - it must not be direct called, only via Release
olp->Release();
m_olps.push_back(olp);
after you call Release() on object you must not it more access here, so or olp->Release() or m_olps.push_back(olp); but not both. this kill all logic of Release may be you need overwrite operator delete of OverlappedEx and inside it call m_olps.push_back(olp); and of course overwrite operator new too
again (OVERLAPPED*)olp - for what reinterpret_cast here ? because you inherit own struct from OVERLAPPED compiler auto do type cast here

ZeroMQ PUB/SUB bind subscriber

I'm studying ZeroMQ with myself.
I tested PUB as a server(bind), SUB as a client(connect) and worked fine. Opposite (PUB as a client(connect), SUB as a server(bind)) also works fine.
When I connect a another SUB socket as client something goes wrong without any exception or errors.
here's my example code.
#include <zmq.hpp>
#include <string>
#include <iostream>
#include <unistd.h>
#include <thread>
class ZMQSock
{
public:
ZMQSock(const char* addr)
{
if (addr != NULL)
{
mctx = new zmq::context_t(1);
mszAddr = new char[strlen(addr) + 1];
snprintf(mszAddr, strlen(addr) + 1, "%s", addr);
}
}
virtual ~ZMQSock()
{
if (msock != nullptr)
delete msock;
if (mctx != nullptr)
delete mctx;
if (mszAddr != nullptr)
delete [] mszAddr;
}
int zbind()
{
if (msock != nullptr)
msock->bind(mszAddr);
else return -1;
return 0;
}
int zconnect()
{
if (msock != nullptr)
msock->connect(mszAddr);
else return -1;
return 0;
}
void start()
{
if (mbthread != false)
return ;
mbthread = true;
mhthread = std::thread(std::bind(&ZMQSock::run, this));
}
virtual void stop()
{
if (mbthread == false)
return ;
mbthread = false;
if (mhthread.joinable())
mhthread.join();
}
virtual void run() = 0;
protected:
char* mszAddr{nullptr};
zmq::context_t* mctx{nullptr};
zmq::socket_t* msock{nullptr};
bool mbthread{false};
std::thread mhthread;
};
class ZPublisher : public ZMQSock
{
public:
ZPublisher(const char* addr) : ZMQSock(addr)
{
if (msock == nullptr)
{
msock = new zmq::socket_t(*mctx, ZMQ_PUB);
}
}
virtual ~ZPublisher()
{
}
bool zsend(const char* data, const unsigned int length, bool sendmore=false)
{
zmq::message_t msg(length);
memcpy(msg.data(), data, length);
if (sendmore)
return msock->send(msg, ZMQ_SNDMORE);
return msock->send(msg);
}
void run()
{
if (mszAddr == nullptr)
return ;
if (strlen(mszAddr) < 6)
return ;
const char* fdelim = "1";
const char* first = "it sends to first. two can not recv this sentence!\0";
const char* sdelim = "2";
const char* second = "it sends to second. one can not recv this sentence!\0";
while (mbthread)
{
zsend(fdelim, 1, true);
zsend(first, strlen(first));
zsend(sdelim, 1, true);
zsend(second, strlen(second));
usleep(1000 * 1000);
}
}
};
class ZSubscriber : public ZMQSock
{
public:
ZSubscriber(const char* addr) : ZMQSock(addr)
{
if (msock == nullptr)
{
msock = new zmq::socket_t(*mctx, ZMQ_SUB);
}
}
virtual ~ZSubscriber()
{
}
void setScriberDelim(const char* delim, const int length)
{
msock->setsockopt(ZMQ_SUBSCRIBE, delim, length);
mdelim = std::string(delim, length);
}
std::string zrecv()
{
zmq::message_t msg;
msock->recv(&msg);
return std::string(static_cast<char*>(msg.data()), msg.size());
}
void run()
{
if (mszAddr == nullptr)
return ;
if (strlen(mszAddr) < 6)
return ;
while (mbthread)
{
std::cout << "MY DELIM IS [" << mdelim << "] - MSG : ";
std::cout << zrecv() << std::endl;
usleep(1000 * 1000);
}
}
private:
std::string mdelim;
};
int main ()
{
ZPublisher pub("tcp://localhost:5252");
ZSubscriber sub1("tcp://localhost:5252");
ZSubscriber sub2("tcp://*:5252");
pub.zconnect();
sub1.zconnect();
sub2.zbind();
sub1.setScriberDelim("1", 1);
sub2.setScriberDelim("2", 1);
pub.start();
std::cout << "PUB Server has been started.." << std::endl;
usleep(1000 * 1000);
sub1.start();
std::cout << "SUB1 Start." << std::endl;
sub2.start();
std::cout << "SUB2 Start." << std::endl;
int i = 0;
std::cout << "< Press any key to exit program. >" << std::endl;
std::cin >> i;
std::cout << "SUB1 STOP START" << std::endl;
sub1.stop();
std::cout << "SUB2 STOP START" << std::endl;
sub2.stop();
std::cout << "PUB STOP START" << std::endl;
pub.stop();
std::cout << "ALL DONE" << std::endl;
return 0;
}
What causes this? or Am I using PUB/SUB illegally?
You are connecting a SUB socket to a SUB socket, that is an invalid connection. In your case the PUB should bind and the SUBs should connect.

boost::asio over SocketCAN

I was thinking of making use of Boost Asio to read data from a Socket CAN.
There's nothing fancy going on in linux/can.h , and the device should
behave like the loopback interface, and be used with a raw socket.
Looking at the basic_raw_socket interface it seems that I can make use of
basic_raw_socket::assign to assign the native socket created with
socket( PF_CAN, SOCK_RAW, CAN_RAW );
This is what I have so far
namespace can {
class CanSocket {
public:
typedef boost::asio::ip::basic_endpoint<CanSocket> endpoint;
typedef boost::asio::ip::basic_resolver_query<CanSocket> resolver_query;
typedef boost::asio::ip::basic_resolver_iterator<CanSocket> resolver_iterator;
typedef boost::asio::basic_raw_socket<CanSocket> socket;
typedef boost::asio::ip::basic_resolver<CanSocket> resolver;
CanSocket()
: _protocol( CAN_RAW )
, _family( PF_CAN )
{
}
static CanSocket v4()
{
return CanSocket();
}
static CanSocket v6();
int type() const;
int protocol() const;
int family() const;
friend bool operator==(const CanSocket& p1, const CanSocket& p2)
{
return p1._protocol != p2._protocol || p1._family != p2._family;
}
friend bool operator!=(const CanSocket& p1, const CanSocket& p2)
{
return p1._protocol == p2._protocol || p1._family == p2._family;
}
private:
int _protocol;
int _family;
};
}
And this is how I use it in my application
boost::asio::io_service ioserv;
CanSocket::socket s( ioserv );
int sock = socket( PF_CAN, SOCK_RAW, CAN_RAW );
s.assign(CanSocket::v4(), sock);
struct ifreq ifr;
strcpy(ifr.ifr_name, "vcan0");
ioctl(sock, SIOCGIFINDEX, &ifr); /* ifr.ifr_ifindex gets filled
* with that device's index */
/* Select that CAN interface, and bind the socket to it. */
/* this should be the endpoint */
struct sockaddr_can addr;
addr.can_family = AF_CAN;
addr.can_ifindex = ifr.ifr_ifindex;
/* s.bind (....) */
bind( sock, (struct sockaddr*)&addr, sizeof(addr) );
What I don't quite get is how do I bind s to the local endpoint? There are no IPs or ports involved.
Is there anything else that should be implemented besides the endpoint to get it going?
Here is working example, assembled with help of this thread
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <iostream>
#include <net/if.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/ioctl.h>
#include <linux/can.h>
#include <linux/can/raw.h>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
void data_send(void) {
std::cout << "omg sent" << std::endl;
}
void data_rec(struct can_frame &rec_frame,
boost::asio::posix::basic_stream_descriptor<> &stream) {
std::cout << std::hex << rec_frame.can_id << " ";
for (int i = 0; i < rec_frame.can_dlc; i++) {
std::cout << std::hex << int(rec_frame.data[i]) << " ";
}
std::cout << std::dec << std::endl;
stream.async_read_some(
boost::asio::buffer(&rec_frame, sizeof(rec_frame)),
boost::bind(data_rec, boost::ref(rec_frame), boost::ref(stream)));
}
int main(void) {
struct sockaddr_can addr;
struct can_frame frame;
struct can_frame rec_frame;
struct ifreq ifr;
int natsock = socket(PF_CAN, SOCK_RAW, CAN_RAW);
strcpy(ifr.ifr_name, "vcan0");
ioctl(natsock, SIOCGIFINDEX, &ifr);
addr.can_family = AF_CAN;
addr.can_ifindex = ifr.ifr_ifindex;
if (bind(natsock, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
perror("Error in socket bind");
return -2;
}
frame.can_id = 0x123;
frame.can_dlc = 2;
frame.data[0] = 0x11;
frame.data[1] = 0x23;
boost::asio::io_service ios;
boost::asio::posix::basic_stream_descriptor<> stream(ios);
stream.assign(natsock);
stream.async_write_some(boost::asio::buffer(&frame, sizeof(frame)),
boost::bind(data_send));
stream.async_read_some(
boost::asio::buffer(&rec_frame, sizeof(rec_frame)),
boost::bind(data_rec, boost::ref(rec_frame), boost::ref(stream)));
ios.run();
}
The solution is to use posix::stream_descriptor.
Just open the native socket, bind and then use posix::basic_stream_descriptor::assign.