C++ Cross-platform Socket with HTTP request - c++

Currently, I am using a very simple code example which help me working with cross-platform sockets.
Github: Socket c++ cross platform
The problem comes when I was trying to make the http GET request
When I try simple requests, it returns me a correct value, but when I this complex request:
"GET /1.0/stock/aapl/batch?types=quote,news,chart&range=1m&last=10 HTTP/1.0\r\nHost: api.iextrading.com\r\nConnection: close\r\n\r\n"
It returns nothing!
//Main.cpp
#include <string>
#include <ActiveSocket.h>
#define SEND(a,b,c,d) send(a, (const char *)b, (int)c, d)
typedef unsigned char uint8;
int main(int argc, char **argv)
{
CActiveSocket socket; // Instantiate active socket object (defaults to TCP).
char time[50];
memset(&time, 0, 50);
char host[] = "api.iextrading.com";
char message[] = "GET /1.0/stock/aapl/batch?types=quote,news,chart&range=1m&last=10 HTTP/1.0\r\nHost: api.iextrading.com\r\nConnection: close\r\n\r\n";
socket.Initialize();
if (socket.Open(host, 80))
{
if (socket.Send((const uint8 *)message, strlen(message)))
{
socket.Receive(4096);//size of the buffer
socket.Close();
}
}
getchar();
return 1;
}
//CActiveSocket.h
int32 CSimpleSocket::Send(const uint8 *pBuf, size_t bytesToSend)
{
SetSocketError(SocketSuccess);
m_nBytesSent = 0;
switch(m_nSocketType)
{
case CSimpleSocket::SocketTypeTcp:
{
if (IsSocketValid())
{
if ((bytesToSend > 0) && (pBuf != NULL))
{
m_timer.Initialize();
m_timer.SetStartTime();
//---------------------------------------------------------
// Check error condition and attempt to resend if call
// was interrupted by a signal.
//---------------------------------------------------------
do
{
m_nBytesSent = SEND(m_socket, pBuf, bytesToSend, 0);
TranslateSocketError();
} while (GetSocketError() == CSimpleSocket::SocketInterrupted);
m_timer.SetEndTime();
}
}
break;
}
...
}
int32 CSimpleSocket::Receive(int32 nMaxBytes, uint8 * pBuffer )
{
m_nBytesReceived = 0;
if (IsSocketValid() == false)
{
return m_nBytesReceived;
}
uint8 * pWorkBuffer = pBuffer;
if ( pBuffer == NULL )
{
if ((m_pBuffer != NULL) && (nMaxBytes != m_nBufferSize))
{
delete [] m_pBuffer;
m_pBuffer = NULL;
}
if (m_pBuffer == NULL)
{
m_nBufferSize = nMaxBytes;
m_pBuffer = new uint8[nMaxBytes];
}
pWorkBuffer = m_pBuffer;
}
SetSocketError(SocketSuccess);
m_timer.Initialize();
m_timer.SetStartTime();
switch (m_nSocketType)
{
case CSimpleSocket::SocketTypeTcp:
{
do
{
m_nBytesReceived = RECV(m_socket, (pWorkBuffer +m_nBytesReceived), nMaxBytes, m_nFlags);
TranslateSocketError();
} while ((GetSocketError() == CSimpleSocket::SocketInterrupted));
break;
}
...
}

Related

Function read() returns 0 when doing socket communications under Linux

The image linked above is the HTML that browser shows. Every time when I press a link, the server cannot accept the correct HTTP information from the browser. Below is my code related to communicating through HTTP.
char buf[2048];
http_handle hh(connfd, buf, 2048);
read(connfd, buf, 2048);
hh.handle_http_request(&hh);
hh.response_http_request(&hh); //the first two function works
read(connfd, buf, 2048); //this returns 0
hh.handle_http_request(&hh);
hh.response_http_request(&hh);
Below is the implementation of handle_http_requestandresponse_http_request:
void* http_handle::handle_http_request(void* arg) {
http_handle* hp = (http_handle*)arg;
hp->_handle_http_request();
return hp;
}
void http_handle::_handle_http_request() {
int i, j;
for (i = 0; this->buf[i] != ' '; i++)
method[i] = this->buf[i];
method[i] = 0;
for (j = 0, ++i; this->buf[i] != ' '; i++, j++)
url[j] = this->buf[i];
url[j] = 0;
//method stores http operations like GET and POST
//url stores the url resource in the http start line
if (!strcasecmp(method, "GET")) {
//...
}
if (!strcasecmp(method, "POST")) {
//...
}
}
void* http_handle::response_http_request(void* arg) {
http_handle* hp = (http_handle*)arg;
hp->_response_http_request();
return hp;
}
void http_handle::_response_http_request() {
if (strcasecmp(method, "GET") && strcasecmp(method, "POST")) {
unimpelented();
return;
}
if (!strcasecmp(method, "GET")) {
if (strcmp(url, "/") == 0) {
char tmp_path[256];
http_handle::path = getcwd(tmp_path, 256);
trans_dir("src");
return;
}
std::string filepath = http_handle::path + "/" + url;
struct stat filestat;
if ((stat(filepath.c_str(), &filestat)) != 0) {
perror("_response_http_request stat error");
exit(1);
}
switch (filestat.st_mode & S_IFMT) {
case S_IFREG:
trans_file(filepath);
break;
case S_IFDIR:
trans_dir(filepath);
break;
default:
break;
}
return;
}
if (!strcasecmp(method, "POST")) { //to be implemented
return;
}
}
read() returns 0 when EOF is reached, ie when the peer has closed the TCP connection on its end.
The data you have shown is not larger than your buffer, so the first read() receives all of the data, and there is nothing left for the second read() because the server closed the connection after sending the data.

net-snmp v2c working well but v3 return STAT_ERROR

I'm currently trying to develop snmpManager in my embedded device(linux OS). This device should communicate with another device's snmpAgent. Well, my code working good for snmpV2c also my snmp packets show in wireshark pcap. But when i try snmpV3 packets snmp_synch_response(session, requestPdu, &responsePdu) func return STAT_ERROR Btw snmpV2c was returning STAT_SUCCESS My code is shown in below, i created snmp session for v2 and v3.
snmp_session *snmptypes::fCreateSession(QString IP)
{
struct snmp_session session;
snmp_sess_init(&session);
session.peername = strdup(IP.toAscii());
static const char* v3_passphrase = "blablabl";
session.retries = 2;
session.timeout = 1000000;
session.engineBoots = 0;
session.engineTime = 0;
QString engineID = "0000000c000000007f000001";
session.contextEngineID = reinterpret_cast<uchar*>(strdup(engineID.toAscii()));
qDebug()<<"contextEngineID:"<<session.contextEngineID;
/*memory allocated by strdup() will be freed by calling snmp_close() */
if(SnmpMainWidget::activeWaveForm == SnmpMainWidget::snmpv2c)
{
session.version = SNMP_VERSION_2c;
const char *str;
QString path = "user";
QByteArray ba;
ba = path.toLatin1();
str = ba.data();
session.community = const_cast<unsigned char*>(reinterpret_cast<const unsigned char *>(str));
session.community_len = path.length();
session.retries = 2;
session.timeout = 1000000;
}
else if (SnmpMainWidget::activeWaveForm == SnmpMainWidget::snmpv3)
{
session.version = SNMP_VERSION_3;
session.securityName = strdup("blabl");
session.securityNameLen = strlen(session.securityName);
session.securityModel = USM_SEC_MODEL_NUMBER;
session.securityLevel = SNMP_SEC_LEVEL_AUTHNOPRIV;
session.securityAuthProto = usmHMACMD5AuthProtocol;
// session.securityAuthProtoLen = sizeof (usmHMACMD5AuthProtocol) / sizeof (oid);
session.securityAuthProtoLen = USM_AUTH_PROTO_MD5_LEN;
session.securityPrivProto = usmNoPrivProtocol;
session.securityPrivProtoLen = USM_PRIV_PROTO_NOPRIV_LEN;
session.securityAuthKeyLen =USM_AUTH_KU_LEN;
if (generate_Ku(session.securityAuthProto,session.securityAuthProtoLen,(u_char *) (v3_passphrase),
strlen(v3_passphrase),session.securityAuthKey,
&session.securityAuthKeyLen) != SNMPERR_SUCCESS)
{
qWarning()<<"ERROR KODU"<<generate_Ku(session.securityAuthProto,session.securityAuthProtoLen,(u_char *) (v3_passphrase),
strlen(v3_passphrase),session.securityAuthKey,
&session.securityAuthKeyLen);
snmp_log(LOG_ERR,"Error generating Ku from authentication pass phrase. \n");
exit(1);
}
}
struct snmp_session* openedSession = snmp_open(&session);
if(openedSession == nullptr)
{
snmp_sess_perror(reinterpret_cast<const char*>("No Ack!"), openedSession);
qWarning()<<"Error while Session opening! FILE:"<<__FILE__<<"LINE:"<<__LINE__;
}
else
qDebug()<<"creating session is succeed!";
return openedSession;
}
Session is creating successfully. no problem so far. Let's continue, then i create pdu like shown below,
snmp_pdu *snmptypes::fCreatePDU(tsPDUvariables PDUvariables)
{
// qDebug()<<"nrepeaters"<<PDUvariables.nrepeaters;
// qDebug()<<"mrepeaters"<<PDUvariables.mrepetitions;
qDebug()<<"setParam"<<PDUvariables.setParam;
qDebug()<<"dataType"<<PDUvariables.dataType;
struct snmp_pdu* requestPdu;
oid requestOid[MAX_OID_LEN];
size_t requestOidLength = MAX_OID_LEN;
snmp_parse_oid(static_cast<const char*>(PDUvariables.OID.toAscii()), requestOid, &requestOidLength);
switch (PDUvariables.msgType) {
case snmpGet:
requestPdu = snmp_pdu_create(SNMP_MSG_GET);
if (SnmpMainWidget::activeWaveForm == SnmpMainWidget::snmpv3){
requestPdu->version = SNMP_VERSION_3;
requestPdu->securityName = strdup("user");
requestPdu->securityNameLen = strlen(requestPdu->securityName);
requestPdu->securityLevel = SNMP_SEC_LEVEL_AUTHNOPRIV;
requestPdu->securityModel = USM_SEC_MODEL_NUMBER;
}
snmp_add_null_var(requestPdu, requestOid, requestOidLength);
break;
//there are some other case like set,getbulk but don't need to focus these.get enough for now.
}
qDebug()<<"creating PDU is succeed!";
return requestPdu;
}
Finally I'm using snmpget func. for send snmp packet to dedicated IP;
Problem occure right here, snmpv2 snmpStatus return success but snmpv3 reutn error. There for snmpv3 packet doesn't appear on wireshark and also doesn't send. I cant understand what am i wrong in V3 while V2 was working well.
bool snmptypes::fSnmpGet(QString IP,QString OID)
{
const char PACKET_HEADER_BYTE = 4;//4Byte = SOH + len/256 + len%256 + EOH
struct snmp_session* session = fCreateSession(IP);
tsPDUvariables PDUvariables;
PDUvariables.OID = OID;
PDUvariables.msgType = snmpGet;
struct snmp_pdu* requestPdu = fCreatePDU(PDUvariables);
qDebug()<<"PDUvariables.msgType"<<PDUvariables.msgType;
qDebug()<<"PDUvariables.dataType"<<PDUvariables.dataType;
qDebug()<<"PDUvariables.OID"<<PDUvariables.OID;
qDebug()<<"PDUvariables.setParam"<<PDUvariables.setParam;
struct snmp_pdu* responsePdu = nullptr;
if(requestPdu != nullptr && session != nullptr)
{
int snmpStatus = snmp_synch_response(session, requestPdu, &responsePdu);
qDebug()<<"snmpStatus"<<snmpStatus;
qDebug()<<"requestStatus::errStat"<<requestPdu->errstat;
qDebug()<<"session->s_errno"<<session->s_errno;
if(snmpStatus == STAT_SUCCESS)
{
if( responsePdu->errstat == SNMP_ERR_NOERROR and responsePdu->errstat == SNMP_ERR_NOERROR)
{
/* SUCCESS: Print the result variables */
struct variable_list *snmpVariables;
for(snmpVariables = responsePdu->variables; snmpVariables; snmpVariables = snmpVariables->next_variable)
{ qDebug()<<"fSnmpGet"<<__LINE__;
print_variable(snmpVariables->name, snmpVariables->name_length, snmpVariables);
}
for(snmpVariables = responsePdu->variables; snmpVariables != nullptr; snmpVariables = snmpVariables->next_variable)
{
QString strOID;
for(size_t i =0; i<snmpVariables->name_length;i++)
{
qDebug()<<"snmpVariables->name[i]"<<snmpVariables->name[i];
strOID.append(".");
strOID.append(QString::number( snmpVariables->name[i]));
}
qDebug()<<"strOID"<<strOID;
unsigned int OIDLen = static_cast<unsigned int> (strOID.length());
qDebug()<<"OID_Length:"<< static_cast<unsigned int> (OIDLen);
unsigned int paramLen = static_cast<unsigned int> (snmpVariables->val_len);
qDebug()<<"paramLen"<<paramLen;
unsigned int txDataLen = 5+OIDLen + paramLen;
qDebug()<<"txDataLen"<<txDataLen;
int totalTxDataLen = txDataLen+PACKET_HEADER_BYTE;
unsigned char *outMessBuff = static_cast<unsigned char *>(malloc(totalTxDataLen));
unsigned char *dataBuff = static_cast<unsigned char *>(malloc(txDataLen));
dataBuff[0] = snmpReqResponse;
dataBuff[1] = static_cast<unsigned char>(OIDLen/256 );
dataBuff[2] = static_cast<unsigned char>(OIDLen%256) ;
dataBuff[3] = static_cast<unsigned char>(paramLen/256) ;
dataBuff[4] = static_cast<unsigned char>(paramLen%256 );
if(OIDLen > 0)
{
memcpy(dataBuff+5,strOID.toAscii(),OIDLen);
}
if(paramLen > 0)
{
memcpy(dataBuff+5+OIDLen,snmpVariables->val.string,paramLen);
}
totalTxDataLen = SnmpMainWidget::IPC.prepareIPCmessage(outMessBuff, dataBuff, static_cast<int>(txDataLen));
SnmpMainWidget::IPC.sendIpcMessageToQt(reinterpret_cast<const char *>(outMessBuff),static_cast<unsigned int>(totalTxDataLen)); // reinterpret_cast bit of dangerous should be good test !!
SnmpMainWidget::IPC.PrintPacketInHex("SNMP-->QT :",outMessBuff, static_cast<unsigned int>(totalTxDataLen));
free(dataBuff);
free(outMessBuff);
}
}
}
else
{
if(snmpStatus == STAT_SUCCESS)
{
fprintf(stderr, "Error in packet\nReason: %s\n", snmp_errstring(responsePdu->errstat));
}
else if(snmpStatus == STAT_TIMEOUT)
{
fprintf(stderr, "Timeout: No response from %s.\n", session->peername);
}
else
{
printf("snmp get requestPdu->command: %d\n", requestPdu->command);
printf("snmp get errstat: %s\n", snmp_errstring(requestPdu->errstat));
printf("snmp get errindex: %s\n", snmp_errstring(requestPdu->errindex));
fprintf(stderr, "session->s_errno %d,session->s_snmp_errno %d \n", session->s_errno, session->s_snmp_errno);
}
if(responsePdu)
{
snmp_free_pdu(responsePdu);
}
snmp_close(session);
}
}
else
return false;
return true;
}
I'm just solving the problem. When I checked the internet I realize missing init_snmp("snmpapp"); function in my code. After adding init snmp, v3 set get message is send perfectly.
snmp_session *snmptypes::fCreateSession(QString IP)
{
struct snmp_session session;
init_snmp("snmpapp");
snmp_sess_init(&session);
session.peername = strdup(IP.toAscii());
.....
}
Maybe it would be a useful answer for people who will have the same problem.

Interactive Brokers C++ Error: error: 'min' was not declared in this scope

I am trying to setup the Interactive Brokers API on Ubuntu (18.04). I have installed both the IB Gateway, which is used for communicating with exchanges, as well as other API software for developing trading algorithms in Java, C++, C# and Python. (Which you can find here). The API is written in both Java and C++, and as stated prior it offers support for both. However when attempting to compile an example from their source code there is an error in the EReader.cpp file. I have dealt with several other C++ errors in their code however this one I cannot figure out. Here is the code:
#include "StdAfx.h"
#include "shared_ptr.h"
#include "Contract.h"
#include "EDecoder.h"
#include "EMutex.h"
#include "EReader.h"
#include "EClientSocket.h"
#include "EPosixClientSocketPlatform.h"
#include "EReaderSignal.h"
#include "EMessage.h"
#include "DefaultEWrapper.h"
#define IN_BUF_SIZE_DEFAULT 8192
static DefaultEWrapper defaultWrapper;
EReader::EReader(EClientSocket *clientSocket, EReaderSignal *signal)
: processMsgsDecoder_(clientSocket->EClient::serverVersion(), clientSocket->getWrapper(), clientSocket) {
m_isAlive = true;
m_pClientSocket = clientSocket;
m_pEReaderSignal = signal;
m_needsWriteSelect = false;
m_nMaxBufSize = IN_BUF_SIZE_DEFAULT;
m_buf.reserve(IN_BUF_SIZE_DEFAULT);
}
EReader::~EReader(void) {
m_isAlive = false;
#if defined(IB_WIN32)
WaitForSingleObject(m_hReadThread, INFINITE);
#endif
}
void EReader::checkClient() {
m_needsWriteSelect = !m_pClientSocket->getTransport()-
isOutBufferEmpty();
}
void EReader::start() {
#if defined(IB_POSIX)
pthread_t thread;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_create( &thread, &attr, readToQueueThread, this );
pthread_attr_destroy(&attr);
#elif defined(IB_WIN32)
m_hReadThread = CreateThread(0, 0, readToQueueThread, this, 0, 0);
#else
# error "Not implemented on this platform"
#endif
}
#if defined(IB_POSIX)
void * EReader::readToQueueThread(void * lpParam)
#elif defined(IB_WIN32)
DWORD WINAPI EReader::readToQueueThread(LPVOID lpParam)
#else
# error "Not implemented on this platform"
#endif
{
EReader *pThis = reinterpret_cast<EReader *>(lpParam);
pThis->readToQueue();
return 0;
}
void EReader::readToQueue() {
EMessage *msg = 0;
while (m_isAlive) {
if (m_buf.size() == 0 && !processNonBlockingSelect() && m_pClientSocket->isSocketOK())
continue;
if (!putMessageToQueue())
break;
}
m_pClientSocket->handleSocketError();
m_pEReaderSignal->issueSignal(); //letting client know that socket was closed
}
bool EReader::putMessageToQueue() {
EMessage *msg = 0;
if (m_pClientSocket->isSocketOK())
msg = readSingleMsg();
if (msg == 0)
return false;
m_csMsgQueue.Enter();
m_msgQueue.push_back(ibapi::shared_ptr<EMessage>(msg));
m_csMsgQueue.Leave();
m_pEReaderSignal->issueSignal();
return true;
}
bool EReader::processNonBlockingSelect() {
fd_set readSet, writeSet, errorSet;
struct timeval tval;
tval.tv_usec = 100 * 1000; //100 ms
tval.tv_sec = 0;
if( m_pClientSocket->fd() >= 0 ) {
FD_ZERO( &readSet);
errorSet = writeSet = readSet;
FD_SET( m_pClientSocket->fd(), &readSet);
if (m_needsWriteSelect)
FD_SET( m_pClientSocket->fd(), &writeSet);
FD_SET( m_pClientSocket->fd(), &errorSet);
int ret = select( m_pClientSocket->fd() + 1, &readSet, &writeSet, &errorSet, &tval);
if( ret == 0) { // timeout
return false;
}
if( ret < 0) { // error
m_pClientSocket->eDisconnect();
return false;
}
if( m_pClientSocket->fd() < 0)
return false;
if( FD_ISSET( m_pClientSocket->fd(), &errorSet)) {
// error on socket
m_pClientSocket->onError();
}
if( m_pClientSocket->fd() < 0)
return false;
if( FD_ISSET( m_pClientSocket->fd(), &writeSet)) {
// socket is ready for writing
onSend();
}
if( m_pClientSocket->fd() < 0)
return false;
if( FD_ISSET( m_pClientSocket->fd(), &readSet)) {
// socket is ready for reading
onReceive();
}
return true;
}
return false;
}
void EReader::onSend() {
m_pEReaderSignal->issueSignal();
}
void EReader::onReceive() {
int nOffset = m_buf.size();
m_buf.resize(m_nMaxBufSize);
int nRes = m_pClientSocket->receive(m_buf.data() + nOffset, m_buf.size() - nOffset);
if (nRes <= 0)
return;
m_buf.resize(nRes + nOffset);
}
bool EReader::bufferedRead(char *buf, int size) {
while (size > 0) {
while (m_buf.size() < size && m_buf.size() < m_nMaxBufSize)
if (!processNonBlockingSelect() && !m_pClientSocket->isSocketOK())
return false;
int nBytes = (std::min<unsigned int>)(m_nMaxBufSize, size);
std::copy(m_buf.begin(), m_buf.begin() + nBytes, buf);
std::copy(m_buf.begin() + nBytes, m_buf.end(), m_buf.begin());
m_buf.resize(m_buf.size() - nBytes);
size -= nBytes;
buf += nBytes;
}
return true;
}
EMessage * EReader::readSingleMsg() {
if (m_pClientSocket->usingV100Plus()) {
int msgSize;
if (!bufferedRead((char *)&msgSize, sizeof(msgSize)))
return 0;
msgSize = htonl(msgSize);
if (msgSize <= 0 || msgSize > MAX_MSG_LEN)
return 0;
std::vector<char> buf = std::vector<char>(msgSize);
if (!bufferedRead(buf.data(), buf.size()))
return 0;
return new EMessage(buf);
}
else {
const char *pBegin = 0;
const char *pEnd = 0;
int msgSize = 0;
while (msgSize == 0)
{
if (m_buf.size() >= m_nMaxBufSize * 3/4)
m_nMaxBufSize *= 2;
if (!processNonBlockingSelect() && !m_pClientSocket->isSocketOK())
return 0;
pBegin = m_buf.data();
pEnd = pBegin + m_buf.size();
msgSize = EDecoder(m_pClientSocket->EClient::serverVersion(), &defaultWrapper).parseAndProcessMsg(pBegin, pEnd);
}
std::vector<char> msgData(msgSize);
if (!bufferedRead(msgData.data(), msgSize))
return 0;
if (m_buf.size() < IN_BUF_SIZE_DEFAULT && m_buf.capacity() > IN_BUF_SIZE_DEFAULT)
{
m_buf.resize(m_nMaxBufSize = IN_BUF_SIZE_DEFAULT);
m_buf.shrink_to_fit();
}
EMessage * msg = new EMessage(msgData);
return msg;
}
}
ibapi::shared_ptr<EMessage> EReader::getMsg(void) {
m_csMsgQueue.Enter();
if (m_msgQueue.size() == 0) {
m_csMsgQueue.Leave();
return ibapi::shared_ptr<EMessage>();
}
ibapi::shared_ptr<EMessage> msg = m_msgQueue.front();
m_msgQueue.pop_front();
m_csMsgQueue.Leave();
return msg;
}
void EReader::processMsgs(void) {
m_pClientSocket->onSend();
checkClient();
ibapi::shared_ptr<EMessage> msg = getMsg();
if (!msg.get())
return;
const char *pBegin = msg->begin();
while (processMsgsDecoder_.parseAndProcessMsg(pBegin, msg->end()) > 0)
{
msg = getMsg();
if (!msg.get())
break;
pBegin = msg->begin();
}
}
The error I get it is the following:
error: 'min' was not declared in this scope int nBytes =
min(m_nMaxBuffSize, size);
I have had to do other things such as editing other source code and makefiles, I am stuck here. Any insight would be appreciated.
In my version 973 source at that line I have
int nBytes = (std::min<unsigned int>)(m_nMaxBufSize, size);
Make sure you are using the latest version. The problem may be an example of what happens here Why is "using namespace std" considered bad practice?

How to use the libuv to accept the tcp connection with multi-thread?

I write a C++ dome of tcp server with the libuv. When I check the cpu performance, I found the dome is a single thread running, how can I implement it with multi-thread?
Currently, the dome can hanlde 100,000+ tcp request per second, it can only eat 1 CPU.
Code:
#include <iostream>
#include <atomic>
#include "uv.h"
#include <thread>
#include <mutex>
#include <map>
using namespace std;
auto loop = uv_default_loop();
struct sockaddr_in addr;
typedef struct {
uv_write_t req;
uv_buf_t buf;
} write_req_t;
typedef struct {
uv_stream_t* client;
uv_alloc_cb alloc_cb;
uv_read_cb read_cb;
} begin_read_req;
void alloc_buffer(uv_handle_t *handle, size_t suggested_size, uv_buf_t *buf) {
buf->base = (char*)malloc(suggested_size);
buf->len = suggested_size;
}
void free_write_req(uv_write_t *req) {
write_req_t *wr = (write_req_t*)req;
free(wr->buf.base);
free(wr);
}
void echo_write(uv_write_t *req, int status) {
if (status) {
fprintf(stderr, "Write error %s\n", uv_strerror(status));
}
free_write_req(req);
}
void echo_read(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) {
if (nread > 0) {
auto req = (write_req_t*)malloc(sizeof(write_req_t));
auto *aaa = (char*)malloc(5);
aaa[0] = '+';
aaa[1] = 'O';
aaa[2] = 'K';
aaa[3] = '\r';
aaa[4] = '\n';
req->buf = uv_buf_init(aaa, 5);
uv_write((uv_write_t*)req, client, &req->buf, 1, echo_write);
}
if (nread < 0) {
if (nread != UV_EOF)
fprintf(stderr, "Read error %s\n", uv_err_name(static_cast<unsigned int>(nread)));
uv_close((uv_handle_t*)client, nullptr);
}
free(buf->base);
}
void acceptClientRead(uv_work_t *req) {
begin_read_req *data = (begin_read_req *)req->data;
uv_read_start(data->client, data->alloc_cb, data->read_cb);
}
void on_new_connection(uv_stream_t *server, int status) {
if (status < 0) {
cout << "New connection error:" << uv_strerror(status);
return;
}
uv_tcp_t *client = (uv_tcp_t *)malloc(sizeof(uv_tcp_t));
uv_tcp_init(loop, client);
uv_work_t *req = (uv_work_t *)malloc(sizeof(uv_work_t));
begin_read_req *read_req = (begin_read_req *)malloc(sizeof(begin_read_req));
read_req->client = (uv_stream_t *)client;
read_req->read_cb = echo_read;
read_req->alloc_cb = alloc_buffer;
req->data = read_req;
if (uv_accept(server, (uv_stream_t *)client) == 0) {
uv_read_start((uv_stream_t *)client, alloc_buffer, echo_read);
// uv_queue_work(workloop[0], req, acceptClientRead, nullptr);
}
else {
uv_close((uv_handle_t *)client, nullptr);
}
}
void timer_callback(uv_timer_t* handle) {
cout << std::this_thread::get_id() << "---------" << "hello" << endl;
}
int main() {
uv_tcp_t server{};
uv_tcp_init(loop, &server);
uv_ip4_addr("0.0.0.0", 8790, &addr);
uv_tcp_bind(&server, (const struct sockaddr *) &addr, 0);
uv_listen((uv_stream_t *)&server, 511, on_new_connection);
uv_run(loop, UV_RUN_DEFAULT);
return 0;
}
Of course, I can make the write step asynchronous in the method "echo_read", but I didn't do anything before the write, can I make the demo multi-thread in another way to improve the throughput?

Memory usage with IOCP [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I am converting our code to use IOCP and I got the communication relatively stable, but the memory usage of the application is increasing. Looks like I am getting back (on completion function calls) much fewer objects of OverlappedEx than I create. My code is below. What am I doing wrong?
#ifndef NETWORK_DATA
#define NETWORK_DATA
#include <afxwin.h>
#include <vector>
#include <string>
#include "CriticalSectionLocker.h"
using namespace std;
DWORD NetworkManager::NetworkThread(void* param)
{
bool bRun = true;
while (bRun)
{
DWORD wait = ::WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0);
if (WAIT_OBJECT_0 == wait)
{
bRun = false;
DEBUG_LOG0("Shutdown event was signalled thread");
}
else
{
DWORD dwBytesTransfered = 0;
void* lpContext = nullptr;
OVERLAPPED* pOverlapped = nullptr;
BOOL bReturn = GetQueuedCompletionStatus(s_IOCompletionPort,
&dwBytesTransfered,
(LPDWORD)&lpContext,
&pOverlapped,
INFINITE);
if (nullptr == lpContext)
{
DEBUG_LOG0("invalid context");
/*continue;*/
}
else
{
if (bReturn && dwBytesTransfered > 0)
{
OverlappedEx* data = reinterpret_cast<OverlappedEx*>(pOverlapped);
ServerData* networkData = reinterpret_cast<ServerData*>(lpContext);
if (networkData && data)
{
switch(data->m_opType)
{
case OverlappedEx::OP_READ:
/*DEBUG_LOG4("device name: %s bytes received: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteReceive(dwBytesTransfered, data);
break;
case OverlappedEx::OP_WRITE:
/*DEBUG_LOG4("device name: %s bytes sent: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteSend(dwBytesTransfered, data);
break;
}
}
}
else
{
/*DEBUG_LOG2("GetQueuedCompletionStatus failed: bReturn: %d dwBytesTransferred: %u", bReturn, dwBytesTransfered);*/
}
}
}
}
return 0;
}
enum NetworkType
{
UDP,
TCP
};
struct OverlappedEx : public OVERLAPPED
{
enum OperationType
{
OP_READ,
OP_WRITE
};
const static int MAX_PACKET_SIZE = 2048;
WSABUF m_wBuf;
char m_buffer[MAX_PACKET_SIZE];
OperationType m_opType;
OverlappedEx()
{
Clear();
m_refCount = 1;
}
void AddRef()
{
::InterlockedIncrement(&m_refCount);
}
void Release()
{
::InterlockedDecrement(&m_refCount);
}
int Refcount() const
{
return InterlockedExchangeAdd((unsigned long*)&m_refCount, 0UL);
}
~OverlappedEx()
{
Clear();
}
void Clear()
{
memset(m_buffer, 0, MAX_PACKET_SIZE);
m_wBuf.buf = m_buffer;
m_wBuf.len = MAX_PACKET_SIZE;
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
hEvent = nullptr;
m_opType = OP_READ;
}
private:
volatile LONG m_refCount;
};
class ServerData
{
public:
const static int MAX_REVEIVE_QUEUE_SIZE = 100;
const static int MAX_PACKET_SIZE = 2048;
const static int MAX_SEND_QUEUE_SIZE = 10;
const static int MAX_RECEIVE_QUEUE_SIZE = 100;
const static int MAX_OVERLAPPED_STRUCTS = 20;
ServerData(NetworkType netType, const string& sName, CCommunicationManager::CommHandle handle,
SOCKET sock, HANDLE IOPort) :
m_sName(sName)
{
InitializeCriticalSection(&m_receiveQueLock);
InitializeCriticalSection(&m_objectLock);
m_Handle = handle;
m_Socket = sock;
m_nIPAddress = 0;
m_netType = netType;
m_bEnabled = true;
m_ovlpIndex = 0;
for (int i = 0; i < MAX_OVERLAPPED_STRUCTS; ++i)
{
m_olps.push_back(new OverlappedEx);
}
/* Associate socket with completion handle */
if (m_Socket != 0)
{
CreateIoCompletionPort( reinterpret_cast<HANDLE>(m_Socket), IOPort, reinterpret_cast<ULONG_PTR>(this), 0 );
}
}
~ServerData()
{
CriticalSectionLocker lock(&m_receiveQueLock);
DeleteCriticalSection(&m_receiveQueLock);
DeleteCriticalSection(&m_objectLock);
closesocket(m_Socket);
}
const string& Name() const { return m_sName; }
bool Enabled() const { return m_bEnabled; }
void SetEnabled(bool bEnabled)
{
m_bEnabled = bEnabled;
}
int Handle() const { return m_Handle; }
void SetHandle(int handle)
{
m_Handle = handle;
}
unsigned long IPAddress() const { return m_nIPAddress; }
SOCKET Socket() const
{
return m_Socket;
}
void SetSocket(SOCKET sock)
{
m_Socket = sock;
}
void SetIPAddress(unsigned long nIP)
{
m_nIPAddress = nIP;
}
bool ValidTelegram(const vector<char>& telegram) const
{
return false;
}
OverlappedEx* GetBuffer()
{
OverlappedEx* ret = nullptr;
if (!m_olps.empty())
{
ret = m_olps.front();
m_olps.pop_front();
}
return ret;
}
void CompleteReceive(size_t numBytes, OverlappedEx* data)
{
//DEBUG_LOG1("%d buffers are available", AvailableBufferCount());
if (numBytes > 0)
{
vector<char> v(data->m_buffer, data->m_buffer + numBytes);
ReceivedData rd;
rd.SetData(v);
EnqueReceiveMessage(rd);
}
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
// DEBUG_LOG1("Queue size: %d", m_olps.size());
}
StartReceiving();
}
void CompleteSend(size_t numBytes, OverlappedEx* data)
{
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
//DEBUG_LOG1("Queue size: %d", m_olps.size());
}
//DEBUG_LOG2("Object: %s num sent: %d", Name().c_str(), numBytes);
}
void StartReceiving()
{
DWORD bytesRecv = 0;
sockaddr_in senderAddr;
DWORD flags = 0;
int senderAddrSize = sizeof(senderAddr);
int rc = 0;
CriticalSectionLocker lock(&m_objectLock);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_READ;
olp->AddRef();
switch(GetNetworkType())
{
case UDP:
{
rc = WSARecvFrom(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(SOCKADDR *)&senderAddr,
&senderAddrSize, (OVERLAPPED*)olp, NULL);
}
break;
case TCP:
{
rc = WSARecv(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(OVERLAPPED*)olp, NULL);
}
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
void SetWriteBuf(const SendData& msg, OverlappedEx* data)
{
int len = min(msg.Data().size(), MAX_PACKET_SIZE);
memcpy(data->m_buffer, &msg.Data()[0], len);
data->m_wBuf.buf = data->m_buffer;
data->m_wBuf.len = len;
}
void StartSending(const SendData& msg)
{
DEBUG_LOG1("device name: %s", Name().c_str());
int rc = 0;
DWORD bytesSent = 0;
DWORD flags = 0;
SOCKET sock = Socket();
int addrSize = sizeof(sockaddr_in);
CriticalSectionLocker lock(&m_objectLock);
//UpdateOverlapped(OverlappedEx::OP_WRITE);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
DEBUG_LOG2("name: %s ************* NO AVAILABLE BUFFERS new size: %d ***************", Name().c_str(), m_olps.size());
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_WRITE;
olp->AddRef();
SetWriteBuf(msg, olp);
switch(GetNetworkType())
{
case UDP:
rc = WSASendTo(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (sockaddr*)&msg.SendAddress(),
addrSize, (OVERLAPPED*)olp, NULL);
break;
case TCP:
rc = WSASend(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (OVERLAPPED*)olp, NULL);
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
size_t ReceiveQueueSize()
{
CriticalSectionLocker lock(&m_receiveQueLock);
return m_receiveDataQueue.size();
}
void GetAllData(vector <ReceivedData> & data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
while (m_receiveDataQueue.size() > 0)
{
data.push_back(m_receiveDataQueue.front());
m_receiveDataQueue.pop_front();
}
}
void DequeReceiveMessage(ReceivedData& msg)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() > 0)
{
msg = m_receiveDataQueue.front();
m_receiveDataQueue.pop_front();
}
}
template <class T>
void EnqueReceiveMessage(T&& data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() <= MAX_RECEIVE_QUEUE_SIZE)
{
m_receiveDataQueue.push_back(data);
}
else
{
static int s_nLogCount = 0;
if (s_nLogCount % 100 == 0)
{
DEBUG_LOG2("Max queue size was reached handle id: %d in %s", Handle(), Name().c_str());
}
s_nLogCount++;
}
}
NetworkType GetNetworkType() const
{
return m_netType;
}
private:
ServerData(const ServerData&);
ServerData& operator=(const ServerData&);
private:
bool m_bEnabled; //!< This member flags if this reciever is enabled for receiving incoming connections.
int m_Handle; //!< This member holds the handle for this receiver.
SOCKET m_Socket; //!< This member holds the socket information for this receiver.
unsigned long m_nIPAddress; //!< This member holds an IP address the socket is bound to.
deque < ReceivedData > m_receiveDataQueue;
CRITICAL_SECTION m_receiveQueLock;
CRITICAL_SECTION m_objectLock;
string m_sName;
NetworkType m_netType;
deque<OverlappedEx*> m_olps;
size_t m_ovlpIndex;
};
#endif
your implementation of void Release() have no sense - you decrement m_refCount and so what ? must be
void Release()
{
if (!InterlockedDecrement(&m_refCount)) delete this;
}
as result you never free OverlappedEx* data - this what i just view and this give memory leak.
also can advice - use WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0); this is bad idea for detect shutdown. call only GetQueuedCompletionStatus and for shutdown call PostQueuedCompletionStatus(s_IOCompletionPort, 0, 0, 0) several times(number or threads listen on s_IOCompletionPort) and if thread view pOverlapped==0 - just exit.
use
OverlappedEx* data = static_cast<OverlappedEx*>(pOverlapped);
instead of reinterpret_cast
make ~OverlappedEx() private - it must not be direct called, only via Release
olp->Release();
m_olps.push_back(olp);
after you call Release() on object you must not it more access here, so or olp->Release() or m_olps.push_back(olp); but not both. this kill all logic of Release may be you need overwrite operator delete of OverlappedEx and inside it call m_olps.push_back(olp); and of course overwrite operator new too
again (OVERLAPPED*)olp - for what reinterpret_cast here ? because you inherit own struct from OVERLAPPED compiler auto do type cast here