Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I am converting our code to use IOCP and I got the communication relatively stable, but the memory usage of the application is increasing. Looks like I am getting back (on completion function calls) much fewer objects of OverlappedEx than I create. My code is below. What am I doing wrong?
#ifndef NETWORK_DATA
#define NETWORK_DATA
#include <afxwin.h>
#include <vector>
#include <string>
#include "CriticalSectionLocker.h"
using namespace std;
DWORD NetworkManager::NetworkThread(void* param)
{
bool bRun = true;
while (bRun)
{
DWORD wait = ::WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0);
if (WAIT_OBJECT_0 == wait)
{
bRun = false;
DEBUG_LOG0("Shutdown event was signalled thread");
}
else
{
DWORD dwBytesTransfered = 0;
void* lpContext = nullptr;
OVERLAPPED* pOverlapped = nullptr;
BOOL bReturn = GetQueuedCompletionStatus(s_IOCompletionPort,
&dwBytesTransfered,
(LPDWORD)&lpContext,
&pOverlapped,
INFINITE);
if (nullptr == lpContext)
{
DEBUG_LOG0("invalid context");
/*continue;*/
}
else
{
if (bReturn && dwBytesTransfered > 0)
{
OverlappedEx* data = reinterpret_cast<OverlappedEx*>(pOverlapped);
ServerData* networkData = reinterpret_cast<ServerData*>(lpContext);
if (networkData && data)
{
switch(data->m_opType)
{
case OverlappedEx::OP_READ:
/*DEBUG_LOG4("device name: %s bytes received: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteReceive(dwBytesTransfered, data);
break;
case OverlappedEx::OP_WRITE:
/*DEBUG_LOG4("device name: %s bytes sent: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteSend(dwBytesTransfered, data);
break;
}
}
}
else
{
/*DEBUG_LOG2("GetQueuedCompletionStatus failed: bReturn: %d dwBytesTransferred: %u", bReturn, dwBytesTransfered);*/
}
}
}
}
return 0;
}
enum NetworkType
{
UDP,
TCP
};
struct OverlappedEx : public OVERLAPPED
{
enum OperationType
{
OP_READ,
OP_WRITE
};
const static int MAX_PACKET_SIZE = 2048;
WSABUF m_wBuf;
char m_buffer[MAX_PACKET_SIZE];
OperationType m_opType;
OverlappedEx()
{
Clear();
m_refCount = 1;
}
void AddRef()
{
::InterlockedIncrement(&m_refCount);
}
void Release()
{
::InterlockedDecrement(&m_refCount);
}
int Refcount() const
{
return InterlockedExchangeAdd((unsigned long*)&m_refCount, 0UL);
}
~OverlappedEx()
{
Clear();
}
void Clear()
{
memset(m_buffer, 0, MAX_PACKET_SIZE);
m_wBuf.buf = m_buffer;
m_wBuf.len = MAX_PACKET_SIZE;
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
hEvent = nullptr;
m_opType = OP_READ;
}
private:
volatile LONG m_refCount;
};
class ServerData
{
public:
const static int MAX_REVEIVE_QUEUE_SIZE = 100;
const static int MAX_PACKET_SIZE = 2048;
const static int MAX_SEND_QUEUE_SIZE = 10;
const static int MAX_RECEIVE_QUEUE_SIZE = 100;
const static int MAX_OVERLAPPED_STRUCTS = 20;
ServerData(NetworkType netType, const string& sName, CCommunicationManager::CommHandle handle,
SOCKET sock, HANDLE IOPort) :
m_sName(sName)
{
InitializeCriticalSection(&m_receiveQueLock);
InitializeCriticalSection(&m_objectLock);
m_Handle = handle;
m_Socket = sock;
m_nIPAddress = 0;
m_netType = netType;
m_bEnabled = true;
m_ovlpIndex = 0;
for (int i = 0; i < MAX_OVERLAPPED_STRUCTS; ++i)
{
m_olps.push_back(new OverlappedEx);
}
/* Associate socket with completion handle */
if (m_Socket != 0)
{
CreateIoCompletionPort( reinterpret_cast<HANDLE>(m_Socket), IOPort, reinterpret_cast<ULONG_PTR>(this), 0 );
}
}
~ServerData()
{
CriticalSectionLocker lock(&m_receiveQueLock);
DeleteCriticalSection(&m_receiveQueLock);
DeleteCriticalSection(&m_objectLock);
closesocket(m_Socket);
}
const string& Name() const { return m_sName; }
bool Enabled() const { return m_bEnabled; }
void SetEnabled(bool bEnabled)
{
m_bEnabled = bEnabled;
}
int Handle() const { return m_Handle; }
void SetHandle(int handle)
{
m_Handle = handle;
}
unsigned long IPAddress() const { return m_nIPAddress; }
SOCKET Socket() const
{
return m_Socket;
}
void SetSocket(SOCKET sock)
{
m_Socket = sock;
}
void SetIPAddress(unsigned long nIP)
{
m_nIPAddress = nIP;
}
bool ValidTelegram(const vector<char>& telegram) const
{
return false;
}
OverlappedEx* GetBuffer()
{
OverlappedEx* ret = nullptr;
if (!m_olps.empty())
{
ret = m_olps.front();
m_olps.pop_front();
}
return ret;
}
void CompleteReceive(size_t numBytes, OverlappedEx* data)
{
//DEBUG_LOG1("%d buffers are available", AvailableBufferCount());
if (numBytes > 0)
{
vector<char> v(data->m_buffer, data->m_buffer + numBytes);
ReceivedData rd;
rd.SetData(v);
EnqueReceiveMessage(rd);
}
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
// DEBUG_LOG1("Queue size: %d", m_olps.size());
}
StartReceiving();
}
void CompleteSend(size_t numBytes, OverlappedEx* data)
{
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
//DEBUG_LOG1("Queue size: %d", m_olps.size());
}
//DEBUG_LOG2("Object: %s num sent: %d", Name().c_str(), numBytes);
}
void StartReceiving()
{
DWORD bytesRecv = 0;
sockaddr_in senderAddr;
DWORD flags = 0;
int senderAddrSize = sizeof(senderAddr);
int rc = 0;
CriticalSectionLocker lock(&m_objectLock);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_READ;
olp->AddRef();
switch(GetNetworkType())
{
case UDP:
{
rc = WSARecvFrom(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(SOCKADDR *)&senderAddr,
&senderAddrSize, (OVERLAPPED*)olp, NULL);
}
break;
case TCP:
{
rc = WSARecv(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(OVERLAPPED*)olp, NULL);
}
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
void SetWriteBuf(const SendData& msg, OverlappedEx* data)
{
int len = min(msg.Data().size(), MAX_PACKET_SIZE);
memcpy(data->m_buffer, &msg.Data()[0], len);
data->m_wBuf.buf = data->m_buffer;
data->m_wBuf.len = len;
}
void StartSending(const SendData& msg)
{
DEBUG_LOG1("device name: %s", Name().c_str());
int rc = 0;
DWORD bytesSent = 0;
DWORD flags = 0;
SOCKET sock = Socket();
int addrSize = sizeof(sockaddr_in);
CriticalSectionLocker lock(&m_objectLock);
//UpdateOverlapped(OverlappedEx::OP_WRITE);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
DEBUG_LOG2("name: %s ************* NO AVAILABLE BUFFERS new size: %d ***************", Name().c_str(), m_olps.size());
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_WRITE;
olp->AddRef();
SetWriteBuf(msg, olp);
switch(GetNetworkType())
{
case UDP:
rc = WSASendTo(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (sockaddr*)&msg.SendAddress(),
addrSize, (OVERLAPPED*)olp, NULL);
break;
case TCP:
rc = WSASend(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (OVERLAPPED*)olp, NULL);
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
size_t ReceiveQueueSize()
{
CriticalSectionLocker lock(&m_receiveQueLock);
return m_receiveDataQueue.size();
}
void GetAllData(vector <ReceivedData> & data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
while (m_receiveDataQueue.size() > 0)
{
data.push_back(m_receiveDataQueue.front());
m_receiveDataQueue.pop_front();
}
}
void DequeReceiveMessage(ReceivedData& msg)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() > 0)
{
msg = m_receiveDataQueue.front();
m_receiveDataQueue.pop_front();
}
}
template <class T>
void EnqueReceiveMessage(T&& data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() <= MAX_RECEIVE_QUEUE_SIZE)
{
m_receiveDataQueue.push_back(data);
}
else
{
static int s_nLogCount = 0;
if (s_nLogCount % 100 == 0)
{
DEBUG_LOG2("Max queue size was reached handle id: %d in %s", Handle(), Name().c_str());
}
s_nLogCount++;
}
}
NetworkType GetNetworkType() const
{
return m_netType;
}
private:
ServerData(const ServerData&);
ServerData& operator=(const ServerData&);
private:
bool m_bEnabled; //!< This member flags if this reciever is enabled for receiving incoming connections.
int m_Handle; //!< This member holds the handle for this receiver.
SOCKET m_Socket; //!< This member holds the socket information for this receiver.
unsigned long m_nIPAddress; //!< This member holds an IP address the socket is bound to.
deque < ReceivedData > m_receiveDataQueue;
CRITICAL_SECTION m_receiveQueLock;
CRITICAL_SECTION m_objectLock;
string m_sName;
NetworkType m_netType;
deque<OverlappedEx*> m_olps;
size_t m_ovlpIndex;
};
#endif
your implementation of void Release() have no sense - you decrement m_refCount and so what ? must be
void Release()
{
if (!InterlockedDecrement(&m_refCount)) delete this;
}
as result you never free OverlappedEx* data - this what i just view and this give memory leak.
also can advice - use WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0); this is bad idea for detect shutdown. call only GetQueuedCompletionStatus and for shutdown call PostQueuedCompletionStatus(s_IOCompletionPort, 0, 0, 0) several times(number or threads listen on s_IOCompletionPort) and if thread view pOverlapped==0 - just exit.
use
OverlappedEx* data = static_cast<OverlappedEx*>(pOverlapped);
instead of reinterpret_cast
make ~OverlappedEx() private - it must not be direct called, only via Release
olp->Release();
m_olps.push_back(olp);
after you call Release() on object you must not it more access here, so or olp->Release() or m_olps.push_back(olp); but not both. this kill all logic of Release may be you need overwrite operator delete of OverlappedEx and inside it call m_olps.push_back(olp); and of course overwrite operator new too
again (OVERLAPPED*)olp - for what reinterpret_cast here ? because you inherit own struct from OVERLAPPED compiler auto do type cast here
Related
I'm developing a mmorpg with iocp sockets.When im testing with my client simulator , after 70-80 connection , writing operation at socket worker thread slowing down than users get lagged.
This is my worker thread ;
typedef void(*OperationHandler)(Socket * s, uint32 len);
void HandleReadComplete(Socket * s, uint32 len);
void HandleWriteComplete(Socket * s, uint32 len);
void HandleShutdown(Socket * s, uint32 len);
static OperationHandler ophandlers[] =
{
&HandleReadComplete,
&HandleWriteComplete,
&HandleShutdown
};
uint32 THREADCALL SocketMgr::SocketWorkerThread(void * lpParam){
SocketMgr *socketMgr = (SocketMgr *)lpParam;
HANDLE cp = socketMgr->GetCompletionPort();
DWORD len;
Socket * s = nullptr;
OverlappedStruct * ov = nullptr;
LPOVERLAPPED ol_ptr;
while (socketMgr->m_bWorkerThreadsActive)
{
if (!GetQueuedCompletionStatus(cp, &len, (LPDWORD)&s, &ol_ptr, INFINITE))
{
if (s != nullptr)
s->Disconnect();
continue;
}
ov = CONTAINING_RECORD(ol_ptr, OverlappedStruct, m_overlap);
if (ov->m_event == SOCKET_IO_THREAD_SHUTDOWN)
{
delete ov;
return 0;
}
if (ov->m_event < NUM_SOCKET_IO_EVENTS)
ophandlers[ov->m_event](s, len);
}
return 0;}
This is Write Complete event handler;
void HandleWriteComplete(Socket * s, uint32 len)
{
if (s->IsDeleted()) {
return;
}
s->m_writeEvent.Unmark();
s->BurstBegin(); // Lock
s->GetWriteBuffer().Remove(len);
TRACE("SOCK = %d removed = %d",s->GetFd(),len);
if (s->GetWriteBuffer().GetContiguousBytes() > 0) {
s->WriteCallback();
}
else {
s->DecSendLock();
}
s->BurstEnd(); // Unlock
}
WriteCallBack function ;
void Socket::WriteCallback()
{
if (IsDeleted() || !IsConnected()) {
return;
}
// We don't want any writes going on while this is happening.
Guard lock(m_writeMutex);
if(writeBuffer.GetContiguousBytes())
{
DWORD w_length = 0;
DWORD flags = 0;
// attempt to push all the data out in a non-blocking fashion.
WSABUF buf;
buf.len = (ULONG)writeBuffer.GetContiguousBytes();
buf.buf = (char*)writeBuffer.GetBufferStart();
m_writeEvent.Mark();
m_writeEvent.Reset(SOCKET_IO_EVENT_WRITE_END);
TRACE("\n SOCK = %d aslında giden = %X THREADID = %d", GetFd(), buf.buf[4],GetCurrentThreadId());
int r = WSASend(m_fd, &buf, 1, &w_length, flags, &m_writeEvent.m_overlap, 0);
if (r == SOCKET_ERROR && WSAGetLastError() != WSA_IO_PENDING)
{
m_writeEvent.Unmark();
DecSendLock();
Disconnect();
}
}
else
{
// Write operation is completed.
DecSendLock();
}
}
this is stuffs about mutexes ;
public:
/* Atomic wrapper functions for increasing read/write locks */
INLINE void IncSendLock() { Guard lock(m_writeMutex); ++m_writeLock; }
INLINE void DecSendLock() { Guard lock(m_writeMutex);
--m_writeLock; }
INLINE bool HasSendLock() {Guard lock(m_writeMutex); return (m_writeLock != 0); }
INLINE bool AcquireSendLock()
{
Guard lock(m_writeMutex);
if (m_writeLock != 0)
return false;
++m_writeLock;
return true;
}
private:
// Write lock, stops multiple write events from being posted.
uint32 m_writeLock;
std::recursive_mutex m_writeLockMutex;
This is read event handler;
void HandleReadComplete(Socket * s, uint32 len)
{
if (s->IsDeleted())
return;
s->m_readEvent.Unmark();
if (len)
{
s->GetReadBuffer().IncrementWritten(len);
s->OnRead();
s->SetupReadEvent();
}
else
{
// s->Delete(); // Queue deletion.
s->Disconnect();
}
}
OnRead function ;
void KOSocket::OnRead()
{
Packet pkt;
for (;;)
{
if (m_remaining == 0)
{
if (GetReadBuffer().GetSize() < 5) {
//TRACE("pkt returnzzz GetFd %d", GetFd());
return; //check for opcode as well
}
uint16 header = 0;
GetReadBuffer().Read(&header, 2);
//printf("header : %X", header);//derle at k
if (header != 0x55AA)
{
TRACE("%s: Got packet without header 0x55AA, got 0x%X\n", GetRemoteIP().c_str(), header);
goto error_handler;
}
GetReadBuffer().Read(&m_remaining, 2);
if (m_remaining == 0)
{
TRACE("%s: Got packet without an opcode, this should never happen.\n", GetRemoteIP().c_str());
goto error_handler;
}
}
if (m_remaining > GetReadBuffer().GetAllocatedSize())
{
TRACE("%s: Packet received which was %u bytes in size, maximum of %u.\n", GetRemoteIP().c_str(), m_remaining, GetReadBuffer().GetAllocatedSize());
goto error_handler;
}
if (m_remaining > GetReadBuffer().GetSize())
{
if (m_readTries > 4)
{
TRACE("%s: packet fragmentation count is over 4, disconnecting as they're probably up to something bad\n", GetRemoteIP().c_str());
goto error_handler;
}
m_readTries++;
return;
}
uint8 *in_stream = new uint8[m_remaining];
m_readTries = 0;
GetReadBuffer().Read(in_stream, m_remaining);
uint16 footer = 0;
GetReadBuffer().Read(&footer, 2);
if (footer != 0xAA55
|| !DecryptPacket(in_stream, pkt))
{
TRACE("%s: Footer invalid (%X) or failed to decrypt.\n", GetRemoteIP().c_str(), footer);
delete [] in_stream;
goto error_handler;
}
delete [] in_stream;
// Update the time of the last (valid) response from the client.
m_lastResponse = UNIXTIME2;
//TRACE("pkt:%d GetFd %d", pkt.GetOpcode(), GetFd());
if (!HandlePacket(pkt))
{
TRACE("%s: Handler for packet %X returned false\n", GetRemoteIP().c_str(), pkt.GetOpcode());
#ifndef _DEBUG
goto error_handler;
#endif
}
m_remaining = 0;
}
//TRACE("pkt return11 GetFd %d", GetFd());
return;
error_handler:
Disconnect();
}
and this is how my server sends to packet to clients ;
BurstBegin();
//TRACE("\n SOCK = %d FREE SPACE = %d ",GetFd(),GetWriteBuffer().GetSpace()/*, GetWriteBuffer().m_writeLock*/);
if (GetWriteBuffer().GetSpace() < size_t(len + 6))
{
size_t freespace = GetWriteBuffer().GetSpace();
BurstEnd();
Disconnect();
return false;
}
TRACE("\n SOCK = %d gitmesi gereken paket = %X THREADID = %d", GetFd(), out_stream[0],GetCurrentThreadId());
r = BurstSend((const uint8*)"\xaa\x55", 2);
if (r) r = BurstSend((const uint8*)&len, 2);
if (r) r = BurstSend((const uint8*)out_stream, len);
if (r) r = BurstSend((const uint8*)"\x55\xaa", 2);
if (r) BurstPush();
BurstEnd();
The number of Worker threads according to processor number;
for(int i = 0; i < numberOfWorkerThreads; i++)
{
m_thread[i] = new Thread(SocketWorkerThread, this);
}
The server which i tested on it has Intel XEON E5-2630 v3 2.40 ghz (2 processor)
Can you guys help me about how can i improve performance ? For ex: when the client moves in the map per 1.5 second that sends to move packet to server and if it success server sends to every client in that region that move packet.
When client count increasing server starts to slow down to send back to packets to clients.My write buffer filling fully (it's capacity 16384 bytes) cause server couldn't send packets inside of the writebuffer.
Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 1 year ago.
Improve this question
i have some c++ project after a release of support c++20, i want to upgrade my makefile std support 17 to 20 after that point my compiler (gcc10.2) give me a error like this ;
Error
In file included from /usr/local/lib/gcc10/include/c++/bits/node_handle.h:39,
from /usr/local/lib/gcc10/include/c++/bits/stl_tree.h:72,
from /usr/local/lib/gcc10/include/c++/map:60,
from AsyncSQL.h:10,
from AsyncSQL.cpp:4:
/usr/local/lib/gcc10/include/c++/optional: In function 'constexpr std::strong_ordering std::operator<=>(const std::optional<_Tp>&, std::nullopt_t)':
/usr/local/lib/gcc10/include/c++/optional:1052:24: error: invalid operands of types 'bool' and 'int' to binary 'operator<=>'
1052 | { return bool(__x) <=> false; }
| ~~~~~~~~~ ^~~
| |
| bool
gmake[2]: *** [Makefile:23: AsyncSQL.o] Error 1
This is my AsyncSQL.cpp ;
#include <sys/time.h>
#include <cstdlib>
#include <cstring>
#include "AsyncSQL.h"
#define MUTEX_LOCK(mtx) pthread_mutex_lock(mtx)
#define MUTEX_UNLOCK(mtx) pthread_mutex_unlock(mtx)
CAsyncSQL::CAsyncSQL(): m_stHost (""), m_stUser (""), m_stPassword (""), m_stDB (""), m_stLocale (""), m_iMsgCount (0), m_iPort (0), m_bEnd (false), m_hThread (0), m_mtxQuery (NULL), m_mtxResult (NULL), m_iQueryFinished (0), m_ulThreadID (0), m_bConnected (false), m_iCopiedQuery (0)
{
memset (&m_hDB, 0, sizeof (m_hDB));
m_aiPipe[0] = 0;
m_aiPipe[1] = 0;
}
CAsyncSQL::~CAsyncSQL()
{
Quit();
Destroy();
}
void CAsyncSQL::Destroy()
{
if (m_hDB.host)
{
sys_log (0, "AsyncSQL: closing mysql connection.");
mysql_close (&m_hDB);
m_hDB.host = NULL;
}
if (m_mtxQuery)
{
pthread_mutex_destroy (m_mtxQuery);
delete m_mtxQuery;
m_mtxQuery = NULL;
}
if (m_mtxResult)
{
pthread_mutex_destroy (m_mtxResult);
delete m_mtxResult;
m_mtxQuery = NULL;
}
}
void* AsyncSQLThread (void* arg)
{
CAsyncSQL* pSQL = ((CAsyncSQL*) arg);
if (!pSQL->Connect())
{
return NULL;
}
pSQL->ChildLoop();
return NULL;
}
bool CAsyncSQL::QueryLocaleSet()
{
if (0 == m_stLocale.length())
{
sys_err ("m_stLocale == 0");
return true;
}
if (mysql_set_character_set (&m_hDB, m_stLocale.c_str()))
{
sys_err ("cannot set locale %s by 'mysql_set_character_set', errno %u %s", m_stLocale.c_str(), mysql_errno (&m_hDB) , mysql_error (&m_hDB));
return false;
}
sys_log (0, "\t--mysql_set_character_set(%s)", m_stLocale.c_str());
return true;
}
bool CAsyncSQL::Connect()
{
if (0 == mysql_init (&m_hDB))
{
fprintf (stderr, "mysql_init failed\n");
return false;
}
if (!m_stLocale.empty())
{
if (mysql_options (&m_hDB, MYSQL_SET_CHARSET_NAME, m_stLocale.c_str()) != 0)
{
fprintf (stderr, "mysql_option failed : MYSQL_SET_CHARSET_NAME %s ", mysql_error(&m_hDB));
}
}
if (!mysql_real_connect (&m_hDB, m_stHost.c_str(), m_stUser.c_str(), m_stPassword.c_str(), m_stDB.c_str(), m_iPort, NULL, CLIENT_MULTI_STATEMENTS))
{
fprintf (stderr, "mysql_real_connect: %s\n", mysql_error(&m_hDB));
return false;
}
my_bool reconnect = true;
if (0 != mysql_options (&m_hDB, MYSQL_OPT_RECONNECT, &reconnect))
{
fprintf (stderr, "mysql_option: %s\n", mysql_error(&m_hDB));
}
m_ulThreadID = mysql_thread_id (&m_hDB);
m_bConnected = true;
return true;
}
bool CAsyncSQL::Setup (CAsyncSQL* sql, bool bNoThread)
{
return Setup (sql->m_stHost.c_str(), sql->m_stUser.c_str(), sql->m_stPassword.c_str(), sql->m_stDB.c_str(), sql->m_stLocale.c_str(), bNoThread, sql->m_iPort);
}
bool CAsyncSQL::Setup (const char* c_pszHost, const char* c_pszUser, const char* c_pszPassword, const char* c_pszDB, const char* c_pszLocale, bool bNoThread, int iPort)
{
m_stHost = c_pszHost;
m_stUser = c_pszUser;
m_stPassword = c_pszPassword;
m_stDB = c_pszDB;
m_iPort = iPort;
if (c_pszLocale)
{
m_stLocale = c_pszLocale;
sys_log (0, "AsyncSQL: locale %s", m_stLocale.c_str());
}
if (!bNoThread)
{
m_mtxQuery = new pthread_mutex_t;
m_mtxResult = new pthread_mutex_t;
if (0 != pthread_mutex_init (m_mtxQuery, NULL))
{
perror ("pthread_mutex_init");
exit (0);
}
if (0 != pthread_mutex_init (m_mtxResult, NULL))
{
perror ("pthread_mutex_init");
exit (0);
}
pthread_create (&m_hThread, NULL, AsyncSQLThread, this);
return true;
}
else
{
return Connect();
}
}
void CAsyncSQL::Quit()
{
m_bEnd = true;
m_sem.Release();
if (m_hThread)
{
pthread_join (m_hThread, NULL);
m_hThread = NULL;
}
}
SQLMsg* CAsyncSQL::DirectQuery (const char* c_pszQuery)
{
if (m_ulThreadID != mysql_thread_id (&m_hDB))
{
sys_log (0, "MySQL connection was reconnected. querying locale set");
while (!QueryLocaleSet());
m_ulThreadID = mysql_thread_id (&m_hDB);
}
SQLMsg* p = new SQLMsg;
p->m_pkSQL = &m_hDB;
p->iID = ++m_iMsgCount;
p->stQuery = c_pszQuery;
if (mysql_real_query (&m_hDB, p->stQuery.c_str(), p->stQuery.length()))
{
char buf[1024];
snprintf (buf, sizeof(buf), "AsyncSQL::DirectQuery : mysql_query error: %s\nquery: %s", mysql_error (&m_hDB), p->stQuery.c_str());
sys_err (buf);
p->uiSQLErrno = mysql_errno (&m_hDB);
}
p->Store();
return p;
}
void CAsyncSQL::AsyncQuery (const char* c_pszQuery)
{
auto p = new SQLMsg;
p->m_pkSQL = &m_hDB;
p->iID = ++m_iMsgCount;
p->stQuery = c_pszQuery;
PushQuery (p);
}
void CAsyncSQL::ReturnQuery (const char* c_pszQuery, void* pvUserData)
{
auto p = new SQLMsg;
p->m_pkSQL = &m_hDB;
p->iID = ++m_iMsgCount;
p->stQuery = c_pszQuery;
p->bReturn = true;
p->pvUserData = pvUserData;
PushQuery (p);
}
void CAsyncSQL::PushResult (SQLMsg* p)
{
MUTEX_LOCK (m_mtxResult);
m_queue_result.push (p);
MUTEX_UNLOCK (m_mtxResult);
}
bool CAsyncSQL::PopResult(SQLMsg** pp)
{
MUTEX_LOCK (m_mtxResult);
if (m_queue_result.empty())
{
MUTEX_UNLOCK (m_mtxResult);
return false;
}
*pp = m_queue_result.front();
m_queue_result.pop();
MUTEX_UNLOCK (m_mtxResult);
return true;
}
void CAsyncSQL::PushQuery (SQLMsg* p)
{
MUTEX_LOCK (m_mtxQuery);
m_queue_query.push (p);
m_sem.Release();
MUTEX_UNLOCK (m_mtxQuery);
}
bool CAsyncSQL::PeekQuery (SQLMsg** pp)
{
MUTEX_LOCK (m_mtxQuery);
if (m_queue_query.empty())
{
MUTEX_UNLOCK (m_mtxQuery);
return false;
}
*pp = m_queue_query.front();
MUTEX_UNLOCK (m_mtxQuery);
return true;
}
bool CAsyncSQL::PopQuery (int iID)
{
MUTEX_LOCK (m_mtxQuery);
if (m_queue_query.empty())
{
MUTEX_UNLOCK (m_mtxQuery);
return false;
}
m_queue_query.pop();
MUTEX_UNLOCK (m_mtxQuery);
return true;
}
bool CAsyncSQL::PeekQueryFromCopyQueue (SQLMsg** pp)
{
if (m_queue_query_copy.empty())
{
return false;
}
*pp = m_queue_query_copy.front();
return true;
}
int CAsyncSQL::CopyQuery()
{
MUTEX_LOCK (m_mtxQuery);
if (m_queue_query.empty())
{
MUTEX_UNLOCK (m_mtxQuery);
return -1;
}
while (!m_queue_query.empty())
{
SQLMsg* p = m_queue_query.front();
m_queue_query_copy.push (p);
m_queue_query.pop();
}
int count = m_queue_query_copy.size();
MUTEX_UNLOCK (m_mtxQuery);
return count;
}
bool CAsyncSQL::PopQueryFromCopyQueue()
{
if (m_queue_query_copy.empty())
{
return false;
}
m_queue_query_copy.pop();
return true;
}
int CAsyncSQL::GetCopiedQueryCount()
{
return m_iCopiedQuery;
}
void CAsyncSQL::ResetCopiedQueryCount()
{
m_iCopiedQuery = 0;
}
void CAsyncSQL::AddCopiedQueryCount (int iCopiedQuery)
{
m_iCopiedQuery += iCopiedQuery;
}
DWORD CAsyncSQL::CountQuery()
{
return m_queue_query.size();
}
DWORD CAsyncSQL::CountResult()
{
return m_queue_result.size();
}
void __timediff (struct timeval* a, struct timeval* b, struct timeval* rslt)
{
if (a->tv_sec < b->tv_sec)
{
rslt->tv_sec = rslt->tv_usec = 0;
}
else if (a->tv_sec == b->tv_sec)
{
if (a->tv_usec < b->tv_usec)
{
rslt->tv_sec = rslt->tv_usec = 0;
}
else
{
rslt->tv_sec = 0;
rslt->tv_usec = a->tv_usec - b->tv_usec;
}
}
else
{
rslt->tv_sec = a->tv_sec - b->tv_sec;
if (a->tv_usec < b->tv_usec)
{
rslt->tv_usec = a->tv_usec + 1000000 - b->tv_usec;
rslt->tv_sec--;
}
else
{
rslt->tv_usec = a->tv_usec - b->tv_usec;
}
}
}
class cProfiler
{
public:
cProfiler()
{
m_nInterval = 0 ;
memset (&prev, 0, sizeof (prev));
memset (&now, 0, sizeof (now));
memset (&interval, 0, sizeof (interval));
Start();
}
cProfiler (int nInterval = 100000)
{
m_nInterval = nInterval;
memset (&prev, 0, sizeof (prev));
memset (&now, 0, sizeof (now));
memset (&interval, 0, sizeof (interval));
Start();
}
void Start()
{
gettimeofday (&prev , (struct timezone*) 0);
}
void Stop()
{
gettimeofday (&now, (struct timezone*) 0);
__timediff (&now, &prev, &interval);
}
bool IsOk()
{
if (interval.tv_sec > (m_nInterval / 1000000))
{
return false;
}
if (interval.tv_usec > m_nInterval)
{
return false;
}
return true;
}
struct timeval* GetResult()
{
return &interval;
}
long GetResultSec()
{
return interval.tv_sec;
}
long GetResultUSec()
{
return interval.tv_usec;
}
private:
int m_nInterval;
struct timeval prev;
struct timeval now;
struct timeval interval;
};
void CAsyncSQL::ChildLoop()
{
cProfiler profiler(500000);
while (!m_bEnd)
{
m_sem.Wait();
int count = CopyQuery();
if (count <= 0)
{
continue;
}
AddCopiedQueryCount (count);
SQLMsg* p;
while (count--)
{
profiler.Start();
if (!PeekQueryFromCopyQueue (&p))
{
continue;
}
if (m_ulThreadID != mysql_thread_id (&m_hDB))
{
sys_log (0, "MySQL connection was reconnected. querying locale set");
while (!QueryLocaleSet());
m_ulThreadID = mysql_thread_id (&m_hDB);
}
if (mysql_real_query (&m_hDB, p->stQuery.c_str(), p->stQuery.length()))
{
p->uiSQLErrno = mysql_errno (&m_hDB);
sys_err ("AsyncSQL: query failed: %s (query: %s errno: %d)", mysql_error (&m_hDB), p->stQuery.c_str(), p->uiSQLErrno);
switch (p->uiSQLErrno)
{
case CR_SOCKET_CREATE_ERROR:
case CR_CONNECTION_ERROR:
case CR_IPSOCK_ERROR:
case CR_UNKNOWN_HOST:
case CR_SERVER_GONE_ERROR:
case CR_CONN_HOST_ERROR:
case ER_NOT_KEYFILE:
case ER_CRASHED_ON_USAGE:
case ER_CANT_OPEN_FILE:
case ER_HOST_NOT_PRIVILEGED:
case ER_HOST_IS_BLOCKED:
case ER_PASSWORD_NOT_ALLOWED:
case ER_PASSWORD_NO_MATCH:
case ER_CANT_CREATE_THREAD:
case ER_INVALID_USE_OF_NULL:
m_sem.Release();
sys_err ("AsyncSQL: retrying");
continue;
}
}
profiler.Stop();
if (!profiler.IsOk())
{
sys_log (0, "[QUERY : LONG INTERVAL(OverSec %ld.%ld)] : %s", profiler.GetResultSec(), profiler.GetResultUSec(), p->stQuery.c_str());
}
PopQueryFromCopyQueue();
if (p->bReturn)
{
p->Store();
PushResult (p);
}
else
{
delete p;
}
++m_iQueryFinished;
}
}
SQLMsg* p;
while (PeekQuery (&p))
{
if (m_ulThreadID != mysql_thread_id (&m_hDB))
{
sys_log (0, "MySQL connection was reconnected. querying locale set");
while (!QueryLocaleSet());
m_ulThreadID = mysql_thread_id (&m_hDB);
}
if (mysql_real_query (&m_hDB, p->stQuery.c_str(), p->stQuery.length()))
{
p->uiSQLErrno = mysql_errno (&m_hDB);
sys_err ("AsyncSQL::ChildLoop : mysql_query error: %s:\nquery: %s", mysql_error (&m_hDB), p->stQuery.c_str());
switch (p->uiSQLErrno)
{
case CR_SOCKET_CREATE_ERROR:
case CR_CONNECTION_ERROR:
case CR_IPSOCK_ERROR:
case CR_UNKNOWN_HOST:
case CR_SERVER_GONE_ERROR:
case CR_CONN_HOST_ERROR:
case ER_NOT_KEYFILE:
case ER_CRASHED_ON_USAGE:
case ER_CANT_OPEN_FILE:
case ER_HOST_NOT_PRIVILEGED:
case ER_HOST_IS_BLOCKED:
case ER_PASSWORD_NOT_ALLOWED:
case ER_PASSWORD_NO_MATCH:
case ER_CANT_CREATE_THREAD:
case ER_INVALID_USE_OF_NULL:
continue;
}
}
sys_log (0, "QUERY_FLUSH: %s", p->stQuery.c_str());
PopQuery (p->iID);
if (p->bReturn)
{
p->Store();
PushResult (p);
}
else
{
delete p;
}
++m_iQueryFinished;
}
}
int CAsyncSQL::CountQueryFinished()
{
return m_iQueryFinished;
}
void CAsyncSQL::ResetQueryFinished()
{
m_iQueryFinished = 0;
}
MYSQL* CAsyncSQL::GetSQLHandle()
{
return &m_hDB;
}
size_t CAsyncSQL::EscapeString (char* dst, size_t dstSize, const char* src, size_t srcSize)
{
if (0 == srcSize)
{
memset (dst, 0, dstSize);
return 0;
}
if (0 == dstSize)
{
return 0;
}
if (dstSize < srcSize * 2 + 1)
{
char tmp[256];
size_t tmpLen = sizeof (tmp) > srcSize ? srcSize : sizeof (tmp);
strlcpy (tmp, src, tmpLen);
sys_err ("FATAL ERROR!! not enough buffer size (dstSize %u srcSize %u src%s: %s)", dstSize, srcSize, tmpLen != srcSize ? "(trimmed to 255 characters)" : "", tmp);
dst[0] = '\0';
return 0;
}
return mysql_real_escape_string (GetSQLHandle(), dst, src, srcSize);
}
void CAsyncSQL2::SetLocale (const std::string & stLocale)
{
m_stLocale = stLocale;
QueryLocaleSet();
}
This is my AsyncSQL.h
#ifndef __INC_METIN_II_ASYNCSQL_H__
#define __INC_METIN_II_ASYNCSQL_H__
#include "../../libthecore/src/stdafx.h"
#include "../../libthecore/src/log.h"
#include "../../Ayarlar.h"
#include <string>
#include <queue>
#include <vector>
#include <map>
#include <mysql/server/mysql.h>
#include <mysql/server/errmsg.h>
#include <mysql/server/mysqld_error.h>
#include "Semaphore.h"
typedef struct _SQLResult
{
_SQLResult(): pSQLResult (NULL), uiNumRows (0), uiAffectedRows (0), uiInsertID (0) {}
~_SQLResult()
{
if (pSQLResult)
{
mysql_free_result (pSQLResult);
pSQLResult = NULL;
}
}
MYSQL_RES* pSQLResult;
uint32_t uiNumRows;
uint32_t uiAffectedRows;
uint32_t uiInsertID;
} SQLResult;
typedef struct _SQLMsg
{
_SQLMsg() : m_pkSQL (NULL), iID (0), uiResultPos (0), pvUserData (NULL), bReturn (false), uiSQLErrno (0) {}
~_SQLMsg()
{
auto first = vec_pkResult.begin();
auto past = vec_pkResult.end();
while (first != past)
{
delete * (first++);
}
vec_pkResult.clear();
}
void Store()
{
do
{
SQLResult* pRes = new SQLResult;
pRes->pSQLResult = mysql_store_result (m_pkSQL);
pRes->uiInsertID = mysql_insert_id (m_pkSQL);
pRes->uiAffectedRows = mysql_affected_rows (m_pkSQL);
if (pRes->pSQLResult)
{
pRes->uiNumRows = mysql_num_rows (pRes->pSQLResult);
}
else
{
pRes->uiNumRows = 0;
}
vec_pkResult.push_back (pRes);
}
while (!mysql_next_result (m_pkSQL));
}
SQLResult* Get()
{
if (uiResultPos >= vec_pkResult.size())
{
return NULL;
}
return vec_pkResult[uiResultPos];
}
bool Next()
{
if (uiResultPos + 1 >= vec_pkResult.size())
{
return false;
}
++uiResultPos;
return true;
}
MYSQL* m_pkSQL;
int iID;
std::string stQuery;
std::vector<SQLResult *> vec_pkResult;
unsigned int uiResultPos;
void* pvUserData;
bool bReturn;
unsigned int uiSQLErrno;
} SQLMsg;
class CAsyncSQL
{
public:
CAsyncSQL();
virtual ~CAsyncSQL();
void Quit();
bool Setup (const char* c_pszHost, const char* c_pszUser, const char* c_pszPassword, const char* c_pszDB, const char* c_pszLocale, bool bNoThread = false, int iPort = 0);
bool Setup (CAsyncSQL* sql, bool bNoThread = false);
bool Connect();
bool IsConnected()
{
return m_bConnected;
}
bool QueryLocaleSet();
void AsyncQuery (const char* c_pszQuery);
void ReturnQuery (const char* c_pszQuery, void* pvUserData);
SQLMsg* DirectQuery (const char* c_pszQuery);
DWORD CountQuery();
DWORD CountResult();
void PushResult (SQLMsg* p);
bool PopResult (SQLMsg** pp);
void ChildLoop();
MYSQL* GetSQLHandle();
int CountQueryFinished();
void ResetQueryFinished();
size_t EscapeString (char* dst, size_t dstSize, const char* src, size_t srcSize);
protected:
void Destroy();
void PushQuery (SQLMsg* p);
bool PeekQuery (SQLMsg** pp);
bool PopQuery (int iID);
bool PeekQueryFromCopyQueue (SQLMsg** pp );
INT CopyQuery();
bool PopQueryFromCopyQueue();
public:
int GetCopiedQueryCount();
void ResetCopiedQueryCount();
void AddCopiedQueryCount (int iCopiedQuery);
protected:
MYSQL m_hDB;
std::string m_stHost;
std::string m_stUser;
std::string m_stPassword;
std::string m_stDB;
std::string m_stLocale;
int m_iMsgCount;
int m_aiPipe[2];
int m_iPort;
std::queue<SQLMsg*> m_queue_query;
std::queue<SQLMsg*> m_queue_query_copy;
std::queue<SQLMsg*> m_queue_result;
volatile bool m_bEnd;
pthread_t m_hThread;
pthread_mutex_t* m_mtxQuery;
pthread_mutex_t* m_mtxResult;
CSemaphore m_sem;
int m_iQueryFinished;
unsigned long m_ulThreadID;
bool m_bConnected;
int m_iCopiedQuery;
};
class CAsyncSQL2 : public CAsyncSQL
{
public:
void SetLocale (const std::string & stLocale);
};
#endif
And this is the function the reason of the error ;
optional:1052 ;
#ifdef __cpp_lib_three_way_comparison
template<typename _Tp>
constexpr strong_ordering
operator<=>(const optional<_Tp>& __x, nullopt_t) noexcept
{ return bool(__x) <=> false; }
#else
After a see a document the microsoft release i'm gonna try <= > false; like this and take a error again..
Best Regards.
I ve no idea why it looks is getting bool(__x) <=> false as an bool and int comparison.
I would think you got some strange macro in your files included before to include the header that is going to break the standard code.
I would suggest you try to move above the standard headers and below them your 'user defined' headers.
#include <string>
#include <queue>
#include <vector>
#include <map>
#include <mysql/server/mysql.h>
#include <mysql/server/errmsg.h>
#include <mysql/server/mysqld_error.h>
#include "../../libthecore/src/stdafx.h"
#include "../../libthecore/src/log.h"
#include "../../Ayarlar.h"
#include "Semaphore.h"
EDIT:
i ve found the cause of the problem.
a macro defined in "libthrecore/stdafx.h" (i own the files that is using the author, they are public).
#ifndef false
#define false 0
#define true (!false)
#endif
it is causing false to be read as a int and is causing the spaceship operator to fails with the error shown by the author. Move up the standard headers or remove the macro to solve the error.
I am trying to setup the Interactive Brokers API on Ubuntu (18.04). I have installed both the IB Gateway, which is used for communicating with exchanges, as well as other API software for developing trading algorithms in Java, C++, C# and Python. (Which you can find here). The API is written in both Java and C++, and as stated prior it offers support for both. However when attempting to compile an example from their source code there is an error in the EReader.cpp file. I have dealt with several other C++ errors in their code however this one I cannot figure out. Here is the code:
#include "StdAfx.h"
#include "shared_ptr.h"
#include "Contract.h"
#include "EDecoder.h"
#include "EMutex.h"
#include "EReader.h"
#include "EClientSocket.h"
#include "EPosixClientSocketPlatform.h"
#include "EReaderSignal.h"
#include "EMessage.h"
#include "DefaultEWrapper.h"
#define IN_BUF_SIZE_DEFAULT 8192
static DefaultEWrapper defaultWrapper;
EReader::EReader(EClientSocket *clientSocket, EReaderSignal *signal)
: processMsgsDecoder_(clientSocket->EClient::serverVersion(), clientSocket->getWrapper(), clientSocket) {
m_isAlive = true;
m_pClientSocket = clientSocket;
m_pEReaderSignal = signal;
m_needsWriteSelect = false;
m_nMaxBufSize = IN_BUF_SIZE_DEFAULT;
m_buf.reserve(IN_BUF_SIZE_DEFAULT);
}
EReader::~EReader(void) {
m_isAlive = false;
#if defined(IB_WIN32)
WaitForSingleObject(m_hReadThread, INFINITE);
#endif
}
void EReader::checkClient() {
m_needsWriteSelect = !m_pClientSocket->getTransport()-
isOutBufferEmpty();
}
void EReader::start() {
#if defined(IB_POSIX)
pthread_t thread;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_create( &thread, &attr, readToQueueThread, this );
pthread_attr_destroy(&attr);
#elif defined(IB_WIN32)
m_hReadThread = CreateThread(0, 0, readToQueueThread, this, 0, 0);
#else
# error "Not implemented on this platform"
#endif
}
#if defined(IB_POSIX)
void * EReader::readToQueueThread(void * lpParam)
#elif defined(IB_WIN32)
DWORD WINAPI EReader::readToQueueThread(LPVOID lpParam)
#else
# error "Not implemented on this platform"
#endif
{
EReader *pThis = reinterpret_cast<EReader *>(lpParam);
pThis->readToQueue();
return 0;
}
void EReader::readToQueue() {
EMessage *msg = 0;
while (m_isAlive) {
if (m_buf.size() == 0 && !processNonBlockingSelect() && m_pClientSocket->isSocketOK())
continue;
if (!putMessageToQueue())
break;
}
m_pClientSocket->handleSocketError();
m_pEReaderSignal->issueSignal(); //letting client know that socket was closed
}
bool EReader::putMessageToQueue() {
EMessage *msg = 0;
if (m_pClientSocket->isSocketOK())
msg = readSingleMsg();
if (msg == 0)
return false;
m_csMsgQueue.Enter();
m_msgQueue.push_back(ibapi::shared_ptr<EMessage>(msg));
m_csMsgQueue.Leave();
m_pEReaderSignal->issueSignal();
return true;
}
bool EReader::processNonBlockingSelect() {
fd_set readSet, writeSet, errorSet;
struct timeval tval;
tval.tv_usec = 100 * 1000; //100 ms
tval.tv_sec = 0;
if( m_pClientSocket->fd() >= 0 ) {
FD_ZERO( &readSet);
errorSet = writeSet = readSet;
FD_SET( m_pClientSocket->fd(), &readSet);
if (m_needsWriteSelect)
FD_SET( m_pClientSocket->fd(), &writeSet);
FD_SET( m_pClientSocket->fd(), &errorSet);
int ret = select( m_pClientSocket->fd() + 1, &readSet, &writeSet, &errorSet, &tval);
if( ret == 0) { // timeout
return false;
}
if( ret < 0) { // error
m_pClientSocket->eDisconnect();
return false;
}
if( m_pClientSocket->fd() < 0)
return false;
if( FD_ISSET( m_pClientSocket->fd(), &errorSet)) {
// error on socket
m_pClientSocket->onError();
}
if( m_pClientSocket->fd() < 0)
return false;
if( FD_ISSET( m_pClientSocket->fd(), &writeSet)) {
// socket is ready for writing
onSend();
}
if( m_pClientSocket->fd() < 0)
return false;
if( FD_ISSET( m_pClientSocket->fd(), &readSet)) {
// socket is ready for reading
onReceive();
}
return true;
}
return false;
}
void EReader::onSend() {
m_pEReaderSignal->issueSignal();
}
void EReader::onReceive() {
int nOffset = m_buf.size();
m_buf.resize(m_nMaxBufSize);
int nRes = m_pClientSocket->receive(m_buf.data() + nOffset, m_buf.size() - nOffset);
if (nRes <= 0)
return;
m_buf.resize(nRes + nOffset);
}
bool EReader::bufferedRead(char *buf, int size) {
while (size > 0) {
while (m_buf.size() < size && m_buf.size() < m_nMaxBufSize)
if (!processNonBlockingSelect() && !m_pClientSocket->isSocketOK())
return false;
int nBytes = (std::min<unsigned int>)(m_nMaxBufSize, size);
std::copy(m_buf.begin(), m_buf.begin() + nBytes, buf);
std::copy(m_buf.begin() + nBytes, m_buf.end(), m_buf.begin());
m_buf.resize(m_buf.size() - nBytes);
size -= nBytes;
buf += nBytes;
}
return true;
}
EMessage * EReader::readSingleMsg() {
if (m_pClientSocket->usingV100Plus()) {
int msgSize;
if (!bufferedRead((char *)&msgSize, sizeof(msgSize)))
return 0;
msgSize = htonl(msgSize);
if (msgSize <= 0 || msgSize > MAX_MSG_LEN)
return 0;
std::vector<char> buf = std::vector<char>(msgSize);
if (!bufferedRead(buf.data(), buf.size()))
return 0;
return new EMessage(buf);
}
else {
const char *pBegin = 0;
const char *pEnd = 0;
int msgSize = 0;
while (msgSize == 0)
{
if (m_buf.size() >= m_nMaxBufSize * 3/4)
m_nMaxBufSize *= 2;
if (!processNonBlockingSelect() && !m_pClientSocket->isSocketOK())
return 0;
pBegin = m_buf.data();
pEnd = pBegin + m_buf.size();
msgSize = EDecoder(m_pClientSocket->EClient::serverVersion(), &defaultWrapper).parseAndProcessMsg(pBegin, pEnd);
}
std::vector<char> msgData(msgSize);
if (!bufferedRead(msgData.data(), msgSize))
return 0;
if (m_buf.size() < IN_BUF_SIZE_DEFAULT && m_buf.capacity() > IN_BUF_SIZE_DEFAULT)
{
m_buf.resize(m_nMaxBufSize = IN_BUF_SIZE_DEFAULT);
m_buf.shrink_to_fit();
}
EMessage * msg = new EMessage(msgData);
return msg;
}
}
ibapi::shared_ptr<EMessage> EReader::getMsg(void) {
m_csMsgQueue.Enter();
if (m_msgQueue.size() == 0) {
m_csMsgQueue.Leave();
return ibapi::shared_ptr<EMessage>();
}
ibapi::shared_ptr<EMessage> msg = m_msgQueue.front();
m_msgQueue.pop_front();
m_csMsgQueue.Leave();
return msg;
}
void EReader::processMsgs(void) {
m_pClientSocket->onSend();
checkClient();
ibapi::shared_ptr<EMessage> msg = getMsg();
if (!msg.get())
return;
const char *pBegin = msg->begin();
while (processMsgsDecoder_.parseAndProcessMsg(pBegin, msg->end()) > 0)
{
msg = getMsg();
if (!msg.get())
break;
pBegin = msg->begin();
}
}
The error I get it is the following:
error: 'min' was not declared in this scope int nBytes =
min(m_nMaxBuffSize, size);
I have had to do other things such as editing other source code and makefiles, I am stuck here. Any insight would be appreciated.
In my version 973 source at that line I have
int nBytes = (std::min<unsigned int>)(m_nMaxBufSize, size);
Make sure you are using the latest version. The problem may be an example of what happens here Why is "using namespace std" considered bad practice?
Currently, I am using a very simple code example which help me working with cross-platform sockets.
Github: Socket c++ cross platform
The problem comes when I was trying to make the http GET request
When I try simple requests, it returns me a correct value, but when I this complex request:
"GET /1.0/stock/aapl/batch?types=quote,news,chart&range=1m&last=10 HTTP/1.0\r\nHost: api.iextrading.com\r\nConnection: close\r\n\r\n"
It returns nothing!
//Main.cpp
#include <string>
#include <ActiveSocket.h>
#define SEND(a,b,c,d) send(a, (const char *)b, (int)c, d)
typedef unsigned char uint8;
int main(int argc, char **argv)
{
CActiveSocket socket; // Instantiate active socket object (defaults to TCP).
char time[50];
memset(&time, 0, 50);
char host[] = "api.iextrading.com";
char message[] = "GET /1.0/stock/aapl/batch?types=quote,news,chart&range=1m&last=10 HTTP/1.0\r\nHost: api.iextrading.com\r\nConnection: close\r\n\r\n";
socket.Initialize();
if (socket.Open(host, 80))
{
if (socket.Send((const uint8 *)message, strlen(message)))
{
socket.Receive(4096);//size of the buffer
socket.Close();
}
}
getchar();
return 1;
}
//CActiveSocket.h
int32 CSimpleSocket::Send(const uint8 *pBuf, size_t bytesToSend)
{
SetSocketError(SocketSuccess);
m_nBytesSent = 0;
switch(m_nSocketType)
{
case CSimpleSocket::SocketTypeTcp:
{
if (IsSocketValid())
{
if ((bytesToSend > 0) && (pBuf != NULL))
{
m_timer.Initialize();
m_timer.SetStartTime();
//---------------------------------------------------------
// Check error condition and attempt to resend if call
// was interrupted by a signal.
//---------------------------------------------------------
do
{
m_nBytesSent = SEND(m_socket, pBuf, bytesToSend, 0);
TranslateSocketError();
} while (GetSocketError() == CSimpleSocket::SocketInterrupted);
m_timer.SetEndTime();
}
}
break;
}
...
}
int32 CSimpleSocket::Receive(int32 nMaxBytes, uint8 * pBuffer )
{
m_nBytesReceived = 0;
if (IsSocketValid() == false)
{
return m_nBytesReceived;
}
uint8 * pWorkBuffer = pBuffer;
if ( pBuffer == NULL )
{
if ((m_pBuffer != NULL) && (nMaxBytes != m_nBufferSize))
{
delete [] m_pBuffer;
m_pBuffer = NULL;
}
if (m_pBuffer == NULL)
{
m_nBufferSize = nMaxBytes;
m_pBuffer = new uint8[nMaxBytes];
}
pWorkBuffer = m_pBuffer;
}
SetSocketError(SocketSuccess);
m_timer.Initialize();
m_timer.SetStartTime();
switch (m_nSocketType)
{
case CSimpleSocket::SocketTypeTcp:
{
do
{
m_nBytesReceived = RECV(m_socket, (pWorkBuffer +m_nBytesReceived), nMaxBytes, m_nFlags);
TranslateSocketError();
} while ((GetSocketError() == CSimpleSocket::SocketInterrupted));
break;
}
...
}
How do you copy one stream to another using dedicated read/write threads in C++?
Let's say I have these methods (not real, but to illustrate the point) to read/write data from. These read/write functions could represent anything (network/file/USB/serial/etc).
// returns the number of bytes read
void read(char* buffer, int bufferSize, int* bytesRead);
// returns the number of bytes written
void write(char* buffer, int bufferSize, int* bytesWritten);
The solution should also be portable.
NOTE: I am aware that Windows has a FILE_FLAG_OVERLAPPED feature, but this assumes that the read/write is file IO. Remember, these read/write methods could represent anything.
Here is the solution I came up with.
Header
#pragma once
#include <stdlib.h>
#include <queue>
#include <mutex>
#include <thread>
#include <chrono>
#include <list>
#include <thread>
#define ASYNC_COPY_READ_WRITE_SUCCESS 0
struct BufferBlock;
struct ReadStream
{
// read a stream to a buffer.
// return non-zero if error occured
virtual int read(char* buffer, int bufferSize, int* bytesRead) = 0;
};
struct WriteStream
{
// write a buffer to a stream.
// return non-zero if error occured
virtual int write(char* buffer, int bufferSize, int* bytesWritten) = 0;
};
class BufferBlockManager
{
public:
BufferBlockManager(int numberOfBlocks, int bufferSize);
~BufferBlockManager();
void enqueueBlockForRead(BufferBlock* block);
void dequeueBlockForRead(BufferBlock** block);
void enqueueBlockForWrite(BufferBlock* block);
void dequeueBlockForWrite(BufferBlock** block);
void resetState();
private:
std::list<BufferBlock*> blocks;
std::queue<BufferBlock*> blocksPendingRead;
std::queue<BufferBlock*> blocksPendingWrite;
std::mutex queueLock;
std::chrono::milliseconds dequeueSleepTime;
};
void AsyncCopyStream(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream, int* readResult, int* writeResult);
CPP
#include "AsyncReadWrite.h"
struct BufferBlock
{
BufferBlock(int bufferSize) : buffer(NULL)
{
this->bufferSize = bufferSize;
this->buffer = new char[bufferSize];
this->actualSize = 0;
this->isLastBlock = false;
}
~BufferBlock()
{
this->bufferSize = 0;
free(this->buffer);
this->buffer = NULL;
this->actualSize = 0;
}
char* buffer;
int bufferSize;
int actualSize;
bool isLastBlock;
};
BufferBlockManager::BufferBlockManager(int numberOfBlocks, int bufferSize)
{
dequeueSleepTime = std::chrono::milliseconds(100);
for (int x = 0; x < numberOfBlocks; x++)
{
BufferBlock* block = new BufferBlock(bufferSize);
blocks.push_front(block);
blocksPendingRead.push(block);
}
}
BufferBlockManager::~BufferBlockManager()
{
for (std::list<BufferBlock*>::const_iterator iterator = blocks.begin(), end = blocks.end(); iterator != end; ++iterator) {
delete (*iterator);
}
}
void BufferBlockManager::enqueueBlockForRead(BufferBlock* block)
{
queueLock.lock();
block->actualSize = 0;
block->isLastBlock = false;
blocksPendingRead.push(block);
queueLock.unlock();
}
void BufferBlockManager::dequeueBlockForRead(BufferBlock** block)
{
WAITFOR:
while (blocksPendingRead.size() == 0)
std::this_thread::sleep_for(dequeueSleepTime);
queueLock.lock();
if (blocksPendingRead.size() == 0)
{
queueLock.unlock();
goto WAITFOR;
}
*block = blocksPendingRead.front();
blocksPendingRead.pop();
queueLock.unlock();
}
void BufferBlockManager::enqueueBlockForWrite(BufferBlock* block)
{
queueLock.lock();
blocksPendingWrite.push(block);
queueLock.unlock();
}
void BufferBlockManager::dequeueBlockForWrite(BufferBlock** block)
{
WAITFOR:
while (blocksPendingWrite.size() == 0)
std::this_thread::sleep_for(dequeueSleepTime);
queueLock.lock();
if (blocksPendingWrite.size() == 0)
{
queueLock.unlock();
goto WAITFOR;
}
*block = blocksPendingWrite.front();
blocksPendingWrite.pop();
queueLock.unlock();
}
void BufferBlockManager::resetState()
{
queueLock.lock();
blocksPendingRead = std::queue<BufferBlock*>();
blocksPendingWrite = std::queue<BufferBlock*>();
for (std::list<BufferBlock*>::const_iterator iterator = blocks.begin(), end = blocks.end(); iterator != end; ++iterator) {
(*iterator)->actualSize = 0;
}
queueLock.unlock();
}
struct AsyncCopyContext
{
AsyncCopyContext(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream)
{
this->bufferBlockManager = bufferBlockManager;
this->readStream = readStream;
this->writeStream = writeStream;
this->readResult = ASYNC_COPY_READ_WRITE_SUCCESS;
this->writeResult = ASYNC_COPY_READ_WRITE_SUCCESS;
}
BufferBlockManager* bufferBlockManager;
ReadStream* readStream;
WriteStream* writeStream;
int readResult;
int writeResult;
};
void ReadStreamThread(AsyncCopyContext* asyncContext)
{
int bytesRead = 0;
BufferBlock* readBuffer = NULL;
int readResult = ASYNC_COPY_READ_WRITE_SUCCESS;
while (
// as long there hasn't been any write errors
asyncContext->writeResult == ASYNC_COPY_READ_WRITE_SUCCESS
// and we haven't had an error reading yet
&& readResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// let's deque a block to read to!
asyncContext->bufferBlockManager->dequeueBlockForRead(&readBuffer);
readResult = asyncContext->readStream->read(readBuffer->buffer, readBuffer->bufferSize, &bytesRead);
readBuffer->actualSize = bytesRead;
readBuffer->isLastBlock = bytesRead == 0;
if (readResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// this was a valid read, go ahead and queue it for writing
asyncContext->bufferBlockManager->enqueueBlockForWrite(readBuffer);
}
else
{
// an error occured reading
asyncContext->readResult = readResult;
// since an error occured, lets queue an block to write indicatiting we are done and there are no more bytes to read
readBuffer->isLastBlock = true;
readBuffer->actualSize = 0;
asyncContext->bufferBlockManager->enqueueBlockForWrite(readBuffer);
}
if (readBuffer->isLastBlock) return;
}
}
void WriteStreamThread(AsyncCopyContext* asyncContext)
{
int bytesWritten = 0;
BufferBlock* writeBuffer = NULL;
int writeResult = ASYNC_COPY_READ_WRITE_SUCCESS;
bool isLastWriteBlock = false;
while (
// as long as there are no errors during reading
asyncContext->readResult == ASYNC_COPY_READ_WRITE_SUCCESS
// and we haven't had an error writing yet
&& writeResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// lets dequeue a block for writing!
asyncContext->bufferBlockManager->dequeueBlockForWrite(&writeBuffer);
isLastWriteBlock = writeBuffer->isLastBlock;
if (writeBuffer->actualSize > 0)
writeResult = asyncContext->writeStream->write(writeBuffer->buffer, writeBuffer->actualSize, &bytesWritten);
if (writeResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
asyncContext->bufferBlockManager->enqueueBlockForRead(writeBuffer);
if (isLastWriteBlock) return;
}
else
{
asyncContext->writeResult = writeResult;
asyncContext->bufferBlockManager->enqueueBlockForRead(writeBuffer);
return;
}
}
}
void AsyncCopyStream(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream, int* readResult, int* writeResult)
{
AsyncCopyContext asyncContext(bufferBlockManager, readStream, writeStream);
std::thread readThread(ReadStreamThread, &asyncContext);
std::thread writeThread(WriteStreamThread, &asyncContext);
readThread.join();
writeThread.join();
*readResult = asyncContext.readResult;
*writeResult = asyncContext.writeResult;
}
Usage
#include <stdio.h>
#include <tchar.h>
#include "AsyncReadWrite.h"
struct ReadTestStream : ReadStream
{
int readCount = 0;
int read(char* buffer, int bufferSize, int* bytesRead)
{
printf("Starting read...\n");
memset(buffer, bufferSize, 0);
if (readCount == 10)
{
*bytesRead = 0;
return 0;
}
// pretend this function takes a while!
std::this_thread::sleep_for(std::chrono::milliseconds(100));
char buff[100];
sprintf_s(buff, "This is read number %d\n", readCount);
strcpy_s(buffer, sizeof(buff), buff);
*bytesRead = strlen(buffer);
readCount++;
printf("Finished read...\n");
return 0;
}
};
struct WriteTestStream : WriteStream
{
int write(char* buffer, int bufferSize, int* bytesWritten)
{
printf("Starting write...\n");
// pretend this function takes a while!
std::this_thread::sleep_for(std::chrono::milliseconds(500));
printf(buffer);
printf("Finished write...\n");
return 0;
}
};
int _tmain(int argc, _TCHAR* argv[])
{
BufferBlockManager bufferBlockManager(5, 4096);
ReadTestStream readStream;
WriteTestStream writeStream;
int readResult = 0;
int writeResult = 0;
printf("Starting copy...\n");
AsyncCopyStream(&bufferBlockManager, &readStream, &writeStream, &readResult, &writeResult);
printf("Finished copy... readResult=%d writeResult=%d \n", readResult, writeResult);
getchar();
return 0;
}
EDIT: I put my solution into a GitHub repository here. If you wish to use this code, refer to the repository since it may be more updated than this answer.
Typically, you would just have one thread for each direction that alternates between reads and writes.