Access Violation using simple Indy server code - c++

In the following code, I have a simple server that sends a message to all clients 2 times a second, and another message 8-10 times a minute.
The problem is I am getting an error at runtime:
access violation at 00479740 read address FFFFFFD0
But only in a few systems, and only 1 or 2 times a day. This software works about 10 hours a day.
I have tried to use similar code with the ICS library and is seems to work well.
What's wrong in this code? Is there a better way to code it?
void __fastcall TDataNet::DataModuleCreate(TObject *Sender)
{
listaClient= new TThreadList();
psTx= new TStringList();
psRx= new TStringList();
}
void __fastcall TDataNet::DataModuleDestroy(TObject *Sender)
{
IdTCPServer1->Active= false;
listaClient->Free();
delete psTx;
delete psRx;
}
void __fastcall TDataNet::Send( TStrings *ps, TIdContext *AContext)
{
TList *lista;
static int cntSend= 0;
try
{
lista= listaClient->LockList();
if( AContext != NULL )
{
AContext->Connection->IOHandler->Write( ps, true, TIdTextEncoding_UTF8);
}
else
{
for( int i=0; i < lista->Count; i++ )
((TDatiClient*)lista->Items[i])->pThread->Connection->IOHandler->Write( ps, true, TIdTextEncoding_UTF8);
}
}
__finally
{
listaClient->UnlockList();
}
}
void __fastcall TDataNet::SetCambioPilota( void)
{
unsigned short hh, mm, ss, ms, hh1, mm1, ss1, ms1;
unsigned short hh2, mm2, ss2, ms2, hh3, mm3, ss3, ms3;
unsigned short hh4, mm4, ss4, ms4, dd4;
unsigned short hh5, mm5, ss5, ms5, dd5;
TStrings *ps;
UnicodeString s;
try
{
ps= psTx;
ps->Clear();
s= "<CAMBIO_PILOTA>";
ps->Add( s);
for( int i=0; i < MAX_PILOTI; i++ )
{
s.sprintf( L"<Pilota%02x= I%x,\"A%s\",\"C%s\",\"F%s\",f%x>",
i+1, gara.pilota[i].idnome,
gara.pilota[i].nome.c_str(), gara.pilota[i].nick.c_str(),
gara.pilota[i].nomeTeam.c_str(), gara.pilota[i].idPilotaT );
ps->Add( s);
}
s= "<END_CAMBIO_PILOTA>";
ps->Add( s);
Send( ps );
}
catch(...){}
}
void __fastcall TDataNet::SetDatiGara( void)
{
TStrings *ps;
UnicodeString s;
try
{
ps= psTx;
ps->Clear();
s= "<DATI_GARA>";
ps->Add( s);
s.sprintf( L"<eve=%d,A%x,B%x,C%x,D%x,E%x,F%x,G%x,H%x,I%x,J%x,K%x>", DataB->GetEventoInCorso().idEvento,
DataB->GetEventoInCorso().numEvento, DataB->GetEventoInCorso().subEvento,
DataB->GetNextEvento().idEvento, DataB->GetNextEvento().numEvento, DataB->GetNextEvento().subEvento,
gara.tkTempo, gara.tkDurata - gara.tkTempo,
gara.laps, gara.gDurata > 0 ? (gara.gDurata - gara.laps):0, gara.flInCorso ? (gara.gDurata > 0 ? 2:1):0,
gara.flFineGara );
ps->Add( s);
s= "<END_DATI_GARA>";
ps->Add( s);
Send( ps );
}
catch(...){}
}
void __fastcall TDataNet::Timer1Timer(TObject *Sender)
{
Timer1->Enabled= false;
SetDatiGara();
Timer1->Enabled= true;
}
void __fastcall TDataNet::IdTCPServer1Connect(TIdContext *AContext)
{
TDatiClient* dati;
dati= new TDatiClient;
dati->pThread= AContext;
AContext->Connection->IOHandler->ReadTimeout= 200;
AContext->Data= (TObject*)dati;
try
{
TList* lista;
lista= listaClient->LockList();
lista->Add( dati);
connessioni= lista->Count;
if( FmainWnd )
PostMessage( FmainWnd, WM_EVENTO_TCP, ID_CONNESSO, lista->Count);
int idEvento= DataB->GetEventoInCorso().idEvento;
if( idEvento )
SetCambioStato( idEvento, STATO_EVENTO_START, AContext);
}
__finally
{
listaClient->UnlockList();
}
}
void __fastcall TDataNet::IdTCPServer1Disconnect(TIdContext *AContext)
{
TDatiClient* dati;
dati= (TDatiClient*)AContext->Data;
AContext->Data= NULL;
try
{
listaClient->Remove( dati);
TList* lista;
lista= listaClient->LockList();
connessioni= lista->Count;
if( FmainWnd )
PostMessage( FmainWnd, WM_EVENTO_TCP, ID_DISCONNESSO, lista->Count);
}
__finally
{
listaClient->UnlockList();
}
delete dati;
}
void __fastcall TDataNet::IdTCPServer1Execute(TIdContext *AContext)
{
Sleep( 100);
try
{
AContext->Connection->IOHandler->ReadStrings( psRx, -1);
if( psRx->Count >= 2 && psRx->Strings[0] == "<LAST_MINUTE>" && psRx->Strings[psRx->Count-1] == "<END_LAST_MINUTE>" )
{
psRx->Delete(0);
psRx->Delete(psRx->Count-1);
if( FmainWnd )
SendMessage( FmainWnd, WM_EVENTO_TCP, ID_LAST_MINUTE, (unsigned int)psRx);
}
psRx->Clear();
}
catch( ...) {}
AContext->Connection->CheckForGracefulDisconnect();
}

The error message means you are accessing something that is -48 bytes offset from a NULL pointer. I see all kinds of problems with this code, the least of which is that you are accessing things in a thread-unsafe manner, so you have race conditions that could corrupt memory, amongst other possible issues. For example, your OnExecute event handler is not protecting the psRx object from concurrent access, so multiple clients could populate it with data at the exact same time, corrupting its contents.
TIdTCPServer is a multi-threaded component, its events are fired in the context of worker threads, not the main UI thread, so your event handlers MUST use thread-safe coding.
Besides, what you are doing is not the safest way to handle asynchronous communications with TIdTCPServer anyway. I would suggest something more like the following instead:
class TDatiClient : public TIdServerContext
{
public:
TIdThreadSafeObjectList *Queue;
bool QueueHasObjects;
__fastcall TDatiClient(TIdTCPConnection *AConnection, TIdYarn *AYarn, TThreadList* AList = NULL)
: TIdServerContext(AConnection, AYarn, AList)
{
Queue = new TIdThreadSafeObjectList;
}
__fastcall ~TDatiClient()
{
delete Queue;
}
void __fastcall Send(TStrings *ps)
{
TStringList *toSend = new TStringList;
try
{
toSend->Assign(ps);
TList *lista = Queue->LockList();
try
{
lista->Add(toSend);
QueueHasObjects = true;
}
__finally
{
Queue->UnlockList();
}
}
catch (const Exception &)
{
delete toSend;
}
}
};
void __fastcall TDataNet::TDataNet(TComponent *Owner)
: TDataModule(Owner)
{
// this must be set before you activate the server...
IdTCPServer1->ContextClass = __classid(TDatiClient);
// do this at runtime instead of design-time so
// ContextClass can be set first...
IdTCPServer1->Active = true;
}
void __fastcall TDataNet::~TDataNet()
{
IdTCPServer1->Active = false;
}
void __fastcall TDataNet::Send(TStrings *ps, TIdContext *AContext)
{
static int cntSend = 0;
TList *lista = IdTCPServer1->Contexts->LockList();
try
{
if (AContext)
{
// make sure the client is still in the list...
if (lista->IndexOf(AContext) != -1)
static_cast<TDatiClient*>(AContext)->Send(ps);
}
else
{
for (int i = 0; i < lista->Count; ++i)
static_cast<TDatiClient*>(static_cast<TIdContext*>(lista->Items[i]))->Send(ps);
}
}
__finally
{
IdTCPServer1->Contexts->UnlockList();
}
}
void __fastcall TDataNet::SetCambioPilota()
{
UnicodeString s;
try
{
TStringList *ps = new TStringList;
try
{
s = _D("<CAMBIO_PILOTA>");
ps->Add(s);
for (int i = 0; i < MAX_PILOTI; ++i)
{
s.sprintf( _D("<Pilota%02x= I%x,\"A%s\",\"C%s\",\"F%s\",f%x>),
// TODO: if SetCambioPilota() is ever called in a worker thread,
// make sure these values are accessed in a thread-safe manner!
i+1, gara.pilota[i].idnome,
gara.pilota[i].nome.c_str(), gara.pilota[i].nick.c_str(),
gara.pilota[i].nomeTeam.c_str(), gara.pilota[i].idPilotaT);
ps->Add(s);
}
s = _D("<END_CAMBIO_PILOTA>");
ps->Add(s);
Send(ps);
}
__finally
{
delete ps;
}
}
catch (const Exception &)
{
}
}
void __fastcall TDataNet::SetDatiGara()
{
UnicodeString s;
try
{
TStringList *ps = new TStringList;
try
{
s = _D("<DATI_GARA>");
ps->Add(s);
s.sprintf( _D("<eve=%d,A%x,B%x,C%x,D%x,E%x,F%x,G%x,H%x,I%x,J%x,K%x>"),
// TODO: if SetDatiGara() is ever called in a worker thread,
// make sure these values are accessed in a thread-safe manner!
DataB->GetEventoInCorso().idEvento,
DataB->GetEventoInCorso().numEvento, DataB->GetEventoInCorso().subEvento,
DataB->GetNextEvento().idEvento, DataB->GetNextEvento().numEvento, DataB->GetNextEvento().subEvento,
gara.tkTempo, gara.tkDurata - gara.tkTempo, gara.laps,
(gara.gDurata > 0) ? (gara.gDurata - gara.laps) : 0,
gara.flInCorso ? ((gara.gDurata > 0) ? 2 : 1) : 0,
gara.flFineGara);
ps->Add(s);
s = _D("<END_DATI_GARA>");
ps->Add(s);
Send(ps);
}
__finally
{
delete ps;
}
}
catch (const Exception &)
{
}
}
void __fastcall TDataNet::Timer1Timer(TObject *Sender)
{
Timer1->Enabled = false;
SetDatiGara();
Timer1->Enabled = true;
}
void __fastcall TDataNet::IdTCPServer1Connect(TIdContext *AContext)
{
TDatiClient* dati = static_cast<TDatiClient*>(AContext);
AContext->Connection->IOHandler->DefStringEncoding = TIdTextEncoding_UTF8;
TList* lista = IdTCPServer1->Contexts->LockList();
try
{
// TODO: this event is fired in a worker thread, so make sure
// that connessioni, DataB, and SetCambioStato() are all being
// accessed in a thread-safe manner!
int connessioni = lista->Count;
if (FmainWnd)
PostMessage(FmainWnd, WM_EVENTO_TCP, ID_CONNESSO, connessioni);
int idEvento = DataB->GetEventoInCorso().idEvento;
if (idEvento)
SetCambioStato(idEvento, STATO_EVENTO_START, AContext);
}
__finally
{
IdTCPServer1->Contexts->UnlockList();
}
}
void __fastcall TDataNet::IdTCPServer1Disconnect(TIdContext *AContext)
{
TDatiClient* dati = static_cast<TDatiClient*>(AContext);
TList* lista = IdTCPServer1->Contexts->LockList();
try
{
int connessioni = lista->Count - 1;
if (FmainWnd)
PostMessage(FmainWnd, WM_EVENTO_TCP, ID_DISCONNESSO, connessioni);
}
__finally
{
IdTCPServer1->Contexts->UnlockList();
}
}
void __fastcall TDataNet::IdTCPServer1Execute(TIdContext *AContext)
{
TDatiClient* dati = static_cast<TDatiClient*>(AContext);
TStringList *ps;
if (dati->QueueHasObjects)
{
TObjectList *objs = new TObjectList(false);
try
{
TList *lista = dati->Queue->LockList();
try
{
objs->Assign(lista);
lista->Clear();
objs->OwnsObjects = true;
}
__finally
{
dati->QueueHasObjects = (lista->Count > 0);
dati->Queue->UnlockList();
}
for (int i = 0; i < objs->Count; ++i)
{
ps = static_cast<TStringList*>(objs->Items[i]);
AContext->Connection->IOHandler->Write(ps, true);
}
}
__finally
{
delete objs;
}
}
if (AContext->Connection->IOHandler->InputBufferIsEmpty())
{
AContext->Connection->IOHandler->CheckForDataOnSource(200);
if (AContext->Connection->IOHandler->InputBufferIsEmpty())
{
AContext->Connection->IOHandler->CheckForDisconnect();
return;
}
}
ps = new TStringList;
try
{
AContext->Connection->IOHandler->ReadStrings(ps, -1);
if ((ps->Count >= 2) && (ps->Strings[0] == _D("<LAST_MINUTE>")) && (ps->Strings[ps->Count-1] == _D("<END_LAST_MINUTE>")))
{
ps->Delete(0);
ps->Delete(ps->Count-1);
if (FmainWnd)
SendMessage(FmainWnd, WM_EVENTO_TCP, ID_LAST_MINUTE, reinterpret_cast<LPARAM>(ps));
}
}
__finally
{
delete ps;
}
}

Related

libpqxx freezes on connection::prepare

My program should synchronize with remote postgres database. My code:
db_handler::DB_Handler::DB_Handler(const char* host, unsigned int port, const char* db, const char* user, const char* password, int& error) {
...
this->password = reinterpret_cast<char*>(malloc(std::strlen(password) + 1));
strcpy(this->password, password);
this->connection = nullptr;
if (this->connect_to_db() != 0) {
error = 1;
}
else {
error = 0;
}
}
int db_handler::DB_Handler::connect_to_db() {
try {
std::string connection_params = "host=";
connection_params += std::string(host);
...
connection_params += " password=";
connection_params += password;
if (this->connection != nullptr) {
delete this->connection;
}
this->connection = new pqxx::connection(connection_params.c_str());
return 0;
}
catch (...) {
return 1;
}
}
int db_handler::DB_Handler::sync() {
for (int tries = 0; tries < 2; tries++) {
try {
this->connection->prepare("get_keys", "*query here*");
...
}
catch (const pqxx::broken_connection& e) {
this->connect_to_db();
std::this_thread::sleep_for(std::chrono::milliseconds(50));
}
catch (...) {
return 1;
}
}
return 1;
}
And on this row this->connection->prepare("get_keys", "*query here*"); my program freezes forever until I will restart it.
Where am I wrong and how can I fix it?
Maybe I should add some timeouts? How can I do it?

grpc server won't release memory, is there a memory leak?

I build a grpc server with c++ and find that its memory won't be released after several requests. memory increase first, and if I keep sending requests, the memory stay at a peak value. After I stop sending requests, memory won't be released or little memory released. what's wrong with my code, memory should be released soon or keep as catch buffer?
class BaseCallData {
public:
BaseCallData(XFRProcessor *processor)
: processor_(processor), status_(CallStatus::CREATE) {}
virtual ~BaseCallData() = default;
void Proceed() {
if (status_ == CallStatus::CREATE) {
status_ = CallStatus::PROCESS;
Request();
} else if (status_ == CallStatus::PROCESS) {
NewCallData();
//TODO
OnRequest();
Response();
status_ = CallStatus::FINISH;
} else if (status_ == CallStatus::FINISH) {
delete this;
} else {
LOGGER_ERROR(Log::GetLog(), "wrong grpc status");
}
}
template<class RpcReq, class RpcRes, class ReqData, class RspData>
void WorkFlow(RpcReq &grpc_request,
RpcRes &grpc_response,
ServerContext &ctx,
ReqData &request_data,
RspData &response_data) {
ErrorCode error_code = RequestReader::Instance()->ReadRequest(grpc_request, ctx, request_data);
ReqTimer req_timer(request_data.log_id_, request_data.request_type_);
if (error_code == OK) {
error_code = processor_->Proceed(&request_data, &response_data, &req_timer);
response_data.error_code = error_code;
grpc_response = ResponseAssigner::Instance()->AssignResponse(response_data);
if (OK == error_code) {
req_timer.SetStatus(true);
} else {
LOGGER_WARN(Log::GetLog(),
"[REQ:{}][LOG:{}] fail to run, err[{}]",
request_data.GetRequestType(),
request_data.GetLogId(),
error_code);
req_timer.SetStatus(false, std::to_string(error_code));
}
} else {
LOGGER_WARN(Log::GetLog(), "fail to read request, err[{}]", error_code);
req_timer.SetStatus(false, std::to_string(error_code));
grpc_response.set_error_code(error_code);
grpc_response.set_error_msg(GetErrorMsg(error_code));
}
}
XFRProcessor *GetProcessor() {
return processor_;
}
private:
virtual void NewCallData() = 0;
virtual void Request() = 0;
virtual void Response() = 0;
virtual void OnRequest() = 0;
enum class CallStatus { CREATE, PROCESS, FINISH };
CallStatus status_;
XFRProcessor *processor_;
};
class DetectCallData : public BaseCallData {
public:
DetectCallData(::xfr::XFRService::AsyncService *service, ServerCompletionQueue *cq, XFRProcessor *processor)
: BaseCallData(processor), p_service_(service), p_cq_(cq), responder_(&ctx_) {
Proceed();
}
void NewCallData() override {
new DetectCallData(p_service_, p_cq_, GetProcessor());
}
void Request() override {
p_service_->RequestDetect(&ctx_, &request_, &responder_, p_cq_, p_cq_, this);
}
void Response() override {
responder_.Finish(response_, Status::OK, this);
}
void OnRequest() override {
WorkFlow(request_, response_, ctx_, request_data_, response_data_);
}
private:
ServerContext ctx_;
::xfr::XFRService::AsyncService *p_service_;
ServerCompletionQueue *p_cq_;
::xfr::DetectRequest request_;
::xfr::DetectResponse response_;
DetectRequest request_data_;
DetectResponse response_data_;
ServerAsyncResponseWriter<::xfr::DetectResponse> responder_;
};
class CompareCallData : public BaseCallData {
...
};
class MatchCallData : public BaseCallData {
...
};
class XFRServer final {
public:
XFRServer(const XFRServer &) = delete;
XFRServer &operator=(const XFRServer &) = delete;
XFRServer() {
Init();
builder_.AddListeningPort(address_, InsecureServerCredentials());
builder_.RegisterService(&service_);
builder_.SetMaxReceiveMessageSize(max_receive_size_);
builder_.SetMaxSendMessageSize(max_send_size_);
for (int i = 0; i < thread_num_; ++i) {
auto p_cq = builder_.AddCompletionQueue();
v_cq_.push_back(std::move(p_cq));
}
}
void Init() {
auto grpc_config = hobot::vision::xfr::ServerConfig::GetConfig()->GetSubConfig("grpc");
address_ = grpc_config->GetSTDStringValue("server_address");
if (address_.empty()) {
LOGGER_ERROR(Log::GetLog(), "fail to get server address: {}", address_);
exit(0);
}
thread_num_ = grpc_config->GetIntValue("server_thread_count", 300);
max_receive_size_ = grpc_config->GetIntValue("max_receive_message_bytes", 20971520);
max_send_size_ = grpc_config->GetIntValue("max_send_message_bytes", 20971520);
}
~XFRServer() {
server_->Shutdown();
//document shows that cq should always shutdown after server
for (auto &cq : v_cq_) {
cq->Shutdown();
}
}
void HandleRpcs(ServerCompletionQueue *cq) {
auto detect_processor = std::make_shared<DetectProcessor>();
auto compare_processor = std::make_shared<CompareProcessor>();
auto match_processor = std::make_shared<MatchProcessor>();
new DetectCallData(&service_, cq, detect_processor.get());
new CompareCallData(&service_, cq, compare_processor.get());
new MatchCallData(&service_, cq, match_processor.get());
void *tag{nullptr};
bool ok{false};
while (true) {
if (!cq->Next(&tag, &ok)) {
LOGGER_WARN(Log::GetLog(), "Server stream closed, quiting");
break;
}
if (ok) {
static_cast<BaseCallData *>(tag)->Proceed();
}
}
}
void run() {
server_ = builder_.BuildAndStart();
for (auto &cq:v_cq_) {
v_threads_.emplace_back(
std::thread([this, &cq] {
HandleRpcs(cq.get());
})
);
}
LOGGER_INFO(Log::GetLog(), "grpc server start working...");
v_threads_.emplace_back(
std::thread([&zk_register] {
zk_register.KeepPublished();
})
);
for (auto &t: v_threads_) {
t.join();
}
}
private:
std::string address_;
int thread_num_;
std::vector<std::thread> v_threads_;
std::vector<std::unique_ptr<ServerCompletionQueue>> v_cq_;
::xfr::XFRService::AsyncService service_;
std::unique_ptr<Server> server_;
ServerBuilder builder_;
int max_receive_size_;
int max_send_size_;
};
here may be one of the answer.
I didn't handle the state machine correctly.
while (true) {
if (!cq->Next(&tag, &ok)) {
LOGGER_WARN(Log::GetLog(), "Server stream closed, quiting");
break;
}
if (ok) {
static_cast<BaseCallData *>(tag)->Proceed();
}
}
when ok!=true, object won't be deleted. so every time when I shutdown the client with 'ctl+c', memory leak.

Threads not running, why?

I wrote a simple test application to prove that the threads work:
// Test.cpp : Defines the entry point for the console application.
//
#include "stdafx.h"
class clsTest {
private:
uintptr_t muintHandle;
static unsigned int __stdcall fnThread(void* pData) {
while( 1 ) {
_sleep(1000);
printf("In fnThread, handle = %d\n", *(uintptr_t*)pData);
}
return 0;
}
public:
clsTest() {
muintHandle = _beginthreadex(0, 0, &clsTest::fnThread, (void*)&muintHandle, 0, 0);
printf("clsTest(), after beginthreadex, handle = %u\n", muintHandle);
}
};
int _tmain(int argc, _TCHAR* argv[]) {
clsTest* pT = NULL;
while(1) {
printf("From _tmain\n");
if ( pT == NULL ) {
pT = new clsTest();
}
_sleep(1000);
}
return 0;
}
The output from this application is:
From _tmain
clsTest(), after beginthreadex, handle = 112
In fnThread, handle = 112
From _tmain
In fnThread, handle = 112
From _tmain
In fnThread, handle = 112
From _tmain
In fnThread, handle = 112
From _tmain
In fnThread, handle = 112
From _tmain
In fnThread, handle = 112
...
Continuously which is exactly what I would expect to see...Now in a much larger project I have a base class:
typedef enum {
eIdle = 0, //Thread is not working at all
eStarted, //Thread has been started but is not fully operational yet
eRunning, //Thread is working normally
ePausing, //Thread is requested to enter the paused state
ePaused, //Thread is paused
eTerminating //Termination has been requested but not completed yet
} eThreadStates;
class clsOpenLDVthread {
protected:
volatile eThreadStates meState;
CRITICAL_SECTION mCritControl; // critical section for thread control
char mszName[80];
HANDLE mhEvent, mhThread;
virtual bool blnStart() = 0;
public:
clsOpenLDVthread(LPCSTR pszName);
~clsOpenLDVthread();
bool inline blnIsRunning();
bool inline blnIsStopped();
bool inline blnIsStopping();
bool inline blnIsStarting();
bool inline blnIsPausing();
bool inline blnIsPaused();
bool blnPause(bool blnState);
virtual bool blnStop();
};
clsOpenLDVthread::clsOpenLDVthread(LPCSTR pszName) : meState(eIdle)
, mhThread(NULL) {
::InitializeCriticalSection(&mCritControl); //Get a critical section
//Get a unique name for signaling event
sprintf(mszName, "%s%d", pszName, ::GetCurrentProcessId());
//Get the event object
mhEvent = ::CreateEvent(NULL, FALSE, FALSE, mszName);
}
clsOpenLDVthread::~clsOpenLDVthread() {
if ( blnIsPaused() ) {
blnPause(false);
}
if ( blnIsRunning() ) {
blnStop();
}
if ( mhEvent ) {
::CloseHandle(mhEvent);
mhEvent = NULL;
}
::DeleteCriticalSection(&mCritControl);
}
bool clsOpenLDVthread::blnIsPaused() {
return meState == ePaused;
}
bool clsOpenLDVthread::blnIsPausing() {
return meState == ePausing;
}
bool clsOpenLDVthread::blnIsRunning() {
return meState == eRunning;
}
bool clsOpenLDVthread::blnIsStarting() {
return meState == eStarted;
}
bool clsOpenLDVthread::blnIsStopped() {
return meState == eIdle;
}
bool clsOpenLDVthread::blnIsStopping() {
return meState == eTerminating;
}
bool clsOpenLDVthread::blnPause(bool blnState) {
bool blnResult = mhThread != NULL;
if ( blnResult ) {
if ( blnState ) {
unsigned uintCountDown = 10u;
if ( blnIsRunning() || blnIsPausing() ) {
meState = ePausing;
while( blnIsPausing() && -- uintCountDown ) {
::SetEvent(mhEvent);
//Give thread chance to run and pause
_sleep(751);
}
blnResult = blnIsPaused();
}
} else {
if ( blnIsPaused() ) {
meState = eRunning;
//this will need replacing...mhThread->ResumeThread();
}
blnResult = true;
}
}
return blnResult;
}
bool clsOpenLDVthread::blnStop() {
bool blnResult = meState == eIdle;
unsigned uintCountDown = 100u;
if ( blnIsPaused() ) {
blnPause(false);
}
if ( blnIsRunning() ) {
meState = eTerminating;
while( !blnIsStopped() && --uintCountDown ) {
if ( mhEvent ) {
::SetEvent(mhEvent);
}
//Give thread a change to run and terminate
_sleep(501);
}
blnResult = blnIsStopped();
mhThread = NULL;
}
return blnResult;
}
Finally a derived class that implements the thread class and provides the blnStart method:
class clsOpenLDVrdr : public clsOpenLDVthread {
public:
//Maximum size of uplink data per single transfer
static const unsigned mscuBuffersize;
private:
//The thread's main routine
static void msgReaderThread(LPVOID lpParam);
public:
clsOpenLDVrdr();
virtual ~clsOpenLDVrdr();
//Call this to start the thread, see clsOpenLDVthread for more operations
virtual bool blnStart();
};
const unsigned clsOpenLDVrdr::mscuBuffersize = MAX_OPENLDV_DATA;
clsOpenLDVrdr::clsOpenLDVrdr() : clsOpenLDVthread(_T("EvOpenLDVrdr")) {
}
clsOpenLDVrdr::~clsOpenLDVrdr() {
}
bool clsOpenLDVrdr::blnStart() {
bool blnResult = false;
if ( blnIsStopped() ) {
meState = eStarted;
//Create the thread
mhThread = (HANDLE)_beginthread(&clsOpenLDVrdr::msgReaderThread
,0, NULL);
blnResult = mhThread != NULL;
while( blnResult && (meState == eStarted) ) {
//Give the thread chance to start and initialize
_sleep(501);
}
}
return blnResult && (meState == eRunning);
}
void clsOpenLDVrdr::msgReaderThread(LPVOID lpParam) {
OutputDebugString("msgReaderThread\n");
}
An instance of the class clsOpenLDVrdr is created and the blnStart method called:
clsOpenLDVrdr* pobjReader = new clsOpenLDVrdr();
pobjReader->blnStart();
I can see in the debugger that "blnStart" is being called and stepping into it everything is executed...but the thread never runs.
Also tried using _beginthreadex instead of _beginthread:
mhThread = (HANDLE)_beginthreadex(0, 0, pfnThread, pobParam, 0, 0);
No difference. There is some kind of incompatibility problem here as the simple example I created at the start of this post works and there isn't much difference between the two versions. Except maybe the way its used...the first simple example was created as a Windows console application. The project I'm having difficulty with is in a DLL.
I'm attaching to the DLL with the debugger and stepping through the code which works until it gets to the loop after the beginthread call then it just loops forever and never gets into the thread.
I just tried the following, adding a standalone thread with a standard C function:
unsigned __stdcall threadTest(void* pobjData) {
OutputDebugString("threadTest\n");
return 0;
}
I then modify the "_beginthread" call as follows:
mhThread = (HANDLE)_beginthreadex(0, 0, threadTest, pobjParam, 0, 0);
Sadly the result is the same, the threadTest function is not called. But a valid handle is returned.
Found this:
unable to call a thread in dll file
Looks interesting and may explain the strange behaviour I'm experiencing.
Solved...I didn't realise at first but for some reason the existing DLL had a call to:
DisableThreadLibraryCalls(hInstance);
This prevents the threads from running. Having commented this out everything now works.

Memory usage with IOCP [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 5 years ago.
Improve this question
I am converting our code to use IOCP and I got the communication relatively stable, but the memory usage of the application is increasing. Looks like I am getting back (on completion function calls) much fewer objects of OverlappedEx than I create. My code is below. What am I doing wrong?
#ifndef NETWORK_DATA
#define NETWORK_DATA
#include <afxwin.h>
#include <vector>
#include <string>
#include "CriticalSectionLocker.h"
using namespace std;
DWORD NetworkManager::NetworkThread(void* param)
{
bool bRun = true;
while (bRun)
{
DWORD wait = ::WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0);
if (WAIT_OBJECT_0 == wait)
{
bRun = false;
DEBUG_LOG0("Shutdown event was signalled thread");
}
else
{
DWORD dwBytesTransfered = 0;
void* lpContext = nullptr;
OVERLAPPED* pOverlapped = nullptr;
BOOL bReturn = GetQueuedCompletionStatus(s_IOCompletionPort,
&dwBytesTransfered,
(LPDWORD)&lpContext,
&pOverlapped,
INFINITE);
if (nullptr == lpContext)
{
DEBUG_LOG0("invalid context");
/*continue;*/
}
else
{
if (bReturn && dwBytesTransfered > 0)
{
OverlappedEx* data = reinterpret_cast<OverlappedEx*>(pOverlapped);
ServerData* networkData = reinterpret_cast<ServerData*>(lpContext);
if (networkData && data)
{
switch(data->m_opType)
{
case OverlappedEx::OP_READ:
/*DEBUG_LOG4("device name: %s bytes received: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteReceive(dwBytesTransfered, data);
break;
case OverlappedEx::OP_WRITE:
/*DEBUG_LOG4("device name: %s bytes sent: %d socket: %d handle: %d",
networkData->Name().c_str(), dwBytesTransfered, networkData->Socket(), networkData->Handle());*/
networkData->CompleteSend(dwBytesTransfered, data);
break;
}
}
}
else
{
/*DEBUG_LOG2("GetQueuedCompletionStatus failed: bReturn: %d dwBytesTransferred: %u", bReturn, dwBytesTransfered);*/
}
}
}
}
return 0;
}
enum NetworkType
{
UDP,
TCP
};
struct OverlappedEx : public OVERLAPPED
{
enum OperationType
{
OP_READ,
OP_WRITE
};
const static int MAX_PACKET_SIZE = 2048;
WSABUF m_wBuf;
char m_buffer[MAX_PACKET_SIZE];
OperationType m_opType;
OverlappedEx()
{
Clear();
m_refCount = 1;
}
void AddRef()
{
::InterlockedIncrement(&m_refCount);
}
void Release()
{
::InterlockedDecrement(&m_refCount);
}
int Refcount() const
{
return InterlockedExchangeAdd((unsigned long*)&m_refCount, 0UL);
}
~OverlappedEx()
{
Clear();
}
void Clear()
{
memset(m_buffer, 0, MAX_PACKET_SIZE);
m_wBuf.buf = m_buffer;
m_wBuf.len = MAX_PACKET_SIZE;
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
hEvent = nullptr;
m_opType = OP_READ;
}
private:
volatile LONG m_refCount;
};
class ServerData
{
public:
const static int MAX_REVEIVE_QUEUE_SIZE = 100;
const static int MAX_PACKET_SIZE = 2048;
const static int MAX_SEND_QUEUE_SIZE = 10;
const static int MAX_RECEIVE_QUEUE_SIZE = 100;
const static int MAX_OVERLAPPED_STRUCTS = 20;
ServerData(NetworkType netType, const string& sName, CCommunicationManager::CommHandle handle,
SOCKET sock, HANDLE IOPort) :
m_sName(sName)
{
InitializeCriticalSection(&m_receiveQueLock);
InitializeCriticalSection(&m_objectLock);
m_Handle = handle;
m_Socket = sock;
m_nIPAddress = 0;
m_netType = netType;
m_bEnabled = true;
m_ovlpIndex = 0;
for (int i = 0; i < MAX_OVERLAPPED_STRUCTS; ++i)
{
m_olps.push_back(new OverlappedEx);
}
/* Associate socket with completion handle */
if (m_Socket != 0)
{
CreateIoCompletionPort( reinterpret_cast<HANDLE>(m_Socket), IOPort, reinterpret_cast<ULONG_PTR>(this), 0 );
}
}
~ServerData()
{
CriticalSectionLocker lock(&m_receiveQueLock);
DeleteCriticalSection(&m_receiveQueLock);
DeleteCriticalSection(&m_objectLock);
closesocket(m_Socket);
}
const string& Name() const { return m_sName; }
bool Enabled() const { return m_bEnabled; }
void SetEnabled(bool bEnabled)
{
m_bEnabled = bEnabled;
}
int Handle() const { return m_Handle; }
void SetHandle(int handle)
{
m_Handle = handle;
}
unsigned long IPAddress() const { return m_nIPAddress; }
SOCKET Socket() const
{
return m_Socket;
}
void SetSocket(SOCKET sock)
{
m_Socket = sock;
}
void SetIPAddress(unsigned long nIP)
{
m_nIPAddress = nIP;
}
bool ValidTelegram(const vector<char>& telegram) const
{
return false;
}
OverlappedEx* GetBuffer()
{
OverlappedEx* ret = nullptr;
if (!m_olps.empty())
{
ret = m_olps.front();
m_olps.pop_front();
}
return ret;
}
void CompleteReceive(size_t numBytes, OverlappedEx* data)
{
//DEBUG_LOG1("%d buffers are available", AvailableBufferCount());
if (numBytes > 0)
{
vector<char> v(data->m_buffer, data->m_buffer + numBytes);
ReceivedData rd;
rd.SetData(v);
EnqueReceiveMessage(rd);
}
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
// DEBUG_LOG1("Queue size: %d", m_olps.size());
}
StartReceiving();
}
void CompleteSend(size_t numBytes, OverlappedEx* data)
{
data->Release();
{
CriticalSectionLocker lock(&m_objectLock);
m_olps.push_back(data);
//DEBUG_LOG1("Queue size: %d", m_olps.size());
}
//DEBUG_LOG2("Object: %s num sent: %d", Name().c_str(), numBytes);
}
void StartReceiving()
{
DWORD bytesRecv = 0;
sockaddr_in senderAddr;
DWORD flags = 0;
int senderAddrSize = sizeof(senderAddr);
int rc = 0;
CriticalSectionLocker lock(&m_objectLock);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_READ;
olp->AddRef();
switch(GetNetworkType())
{
case UDP:
{
rc = WSARecvFrom(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(SOCKADDR *)&senderAddr,
&senderAddrSize, (OVERLAPPED*)olp, NULL);
}
break;
case TCP:
{
rc = WSARecv(Socket(),
&olp->m_wBuf,
1,
&bytesRecv,
&flags,
(OVERLAPPED*)olp, NULL);
}
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
void SetWriteBuf(const SendData& msg, OverlappedEx* data)
{
int len = min(msg.Data().size(), MAX_PACKET_SIZE);
memcpy(data->m_buffer, &msg.Data()[0], len);
data->m_wBuf.buf = data->m_buffer;
data->m_wBuf.len = len;
}
void StartSending(const SendData& msg)
{
DEBUG_LOG1("device name: %s", Name().c_str());
int rc = 0;
DWORD bytesSent = 0;
DWORD flags = 0;
SOCKET sock = Socket();
int addrSize = sizeof(sockaddr_in);
CriticalSectionLocker lock(&m_objectLock);
//UpdateOverlapped(OverlappedEx::OP_WRITE);
auto olp = GetBuffer();
if (!olp)
{
if (...)
{
m_olps.push_back(new OverlappedEx);
olp = GetBuffer();
DEBUG_LOG2("name: %s ************* NO AVAILABLE BUFFERS new size: %d ***************", Name().c_str(), m_olps.size());
}
else
{
if (...)
{
DEBUG_LOG1("Name: %s ************* NO AVAILABLE BUFFERS - bailing ***************", Name().c_str());
}
return;
}
}
olp->Clear();
olp->m_opType = OverlappedEx::OP_WRITE;
olp->AddRef();
SetWriteBuf(msg, olp);
switch(GetNetworkType())
{
case UDP:
rc = WSASendTo(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (sockaddr*)&msg.SendAddress(),
addrSize, (OVERLAPPED*)olp, NULL);
break;
case TCP:
rc = WSASend(Socket(), &olp->m_wBuf, 1,
&bytesSent, flags, (OVERLAPPED*)olp, NULL);
break;
}
if (SOCKET_ERROR == rc)
{
DWORD err = WSAGetLastError();
if (err != WSA_IO_PENDING)
{
olp->Release();
m_olps.push_back(olp);
}
}
}
size_t ReceiveQueueSize()
{
CriticalSectionLocker lock(&m_receiveQueLock);
return m_receiveDataQueue.size();
}
void GetAllData(vector <ReceivedData> & data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
while (m_receiveDataQueue.size() > 0)
{
data.push_back(m_receiveDataQueue.front());
m_receiveDataQueue.pop_front();
}
}
void DequeReceiveMessage(ReceivedData& msg)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() > 0)
{
msg = m_receiveDataQueue.front();
m_receiveDataQueue.pop_front();
}
}
template <class T>
void EnqueReceiveMessage(T&& data)
{
CriticalSectionLocker lock(&m_receiveQueLock);
if (m_receiveDataQueue.size() <= MAX_RECEIVE_QUEUE_SIZE)
{
m_receiveDataQueue.push_back(data);
}
else
{
static int s_nLogCount = 0;
if (s_nLogCount % 100 == 0)
{
DEBUG_LOG2("Max queue size was reached handle id: %d in %s", Handle(), Name().c_str());
}
s_nLogCount++;
}
}
NetworkType GetNetworkType() const
{
return m_netType;
}
private:
ServerData(const ServerData&);
ServerData& operator=(const ServerData&);
private:
bool m_bEnabled; //!< This member flags if this reciever is enabled for receiving incoming connections.
int m_Handle; //!< This member holds the handle for this receiver.
SOCKET m_Socket; //!< This member holds the socket information for this receiver.
unsigned long m_nIPAddress; //!< This member holds an IP address the socket is bound to.
deque < ReceivedData > m_receiveDataQueue;
CRITICAL_SECTION m_receiveQueLock;
CRITICAL_SECTION m_objectLock;
string m_sName;
NetworkType m_netType;
deque<OverlappedEx*> m_olps;
size_t m_ovlpIndex;
};
#endif
your implementation of void Release() have no sense - you decrement m_refCount and so what ? must be
void Release()
{
if (!InterlockedDecrement(&m_refCount)) delete this;
}
as result you never free OverlappedEx* data - this what i just view and this give memory leak.
also can advice - use WaitForSingleObject(CCommunicationManager::s_hShutdownEvent, 0); this is bad idea for detect shutdown. call only GetQueuedCompletionStatus and for shutdown call PostQueuedCompletionStatus(s_IOCompletionPort, 0, 0, 0) several times(number or threads listen on s_IOCompletionPort) and if thread view pOverlapped==0 - just exit.
use
OverlappedEx* data = static_cast<OverlappedEx*>(pOverlapped);
instead of reinterpret_cast
make ~OverlappedEx() private - it must not be direct called, only via Release
olp->Release();
m_olps.push_back(olp);
after you call Release() on object you must not it more access here, so or olp->Release() or m_olps.push_back(olp); but not both. this kill all logic of Release may be you need overwrite operator delete of OverlappedEx and inside it call m_olps.push_back(olp); and of course overwrite operator new too
again (OVERLAPPED*)olp - for what reinterpret_cast here ? because you inherit own struct from OVERLAPPED compiler auto do type cast here

How to asynchronously read/write in C++?

How do you copy one stream to another using dedicated read/write threads in C++?
Let's say I have these methods (not real, but to illustrate the point) to read/write data from. These read/write functions could represent anything (network/file/USB/serial/etc).
// returns the number of bytes read
void read(char* buffer, int bufferSize, int* bytesRead);
// returns the number of bytes written
void write(char* buffer, int bufferSize, int* bytesWritten);
The solution should also be portable.
NOTE: I am aware that Windows has a FILE_FLAG_OVERLAPPED feature, but this assumes that the read/write is file IO. Remember, these read/write methods could represent anything.
Here is the solution I came up with.
Header
#pragma once
#include <stdlib.h>
#include <queue>
#include <mutex>
#include <thread>
#include <chrono>
#include <list>
#include <thread>
#define ASYNC_COPY_READ_WRITE_SUCCESS 0
struct BufferBlock;
struct ReadStream
{
// read a stream to a buffer.
// return non-zero if error occured
virtual int read(char* buffer, int bufferSize, int* bytesRead) = 0;
};
struct WriteStream
{
// write a buffer to a stream.
// return non-zero if error occured
virtual int write(char* buffer, int bufferSize, int* bytesWritten) = 0;
};
class BufferBlockManager
{
public:
BufferBlockManager(int numberOfBlocks, int bufferSize);
~BufferBlockManager();
void enqueueBlockForRead(BufferBlock* block);
void dequeueBlockForRead(BufferBlock** block);
void enqueueBlockForWrite(BufferBlock* block);
void dequeueBlockForWrite(BufferBlock** block);
void resetState();
private:
std::list<BufferBlock*> blocks;
std::queue<BufferBlock*> blocksPendingRead;
std::queue<BufferBlock*> blocksPendingWrite;
std::mutex queueLock;
std::chrono::milliseconds dequeueSleepTime;
};
void AsyncCopyStream(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream, int* readResult, int* writeResult);
CPP
#include "AsyncReadWrite.h"
struct BufferBlock
{
BufferBlock(int bufferSize) : buffer(NULL)
{
this->bufferSize = bufferSize;
this->buffer = new char[bufferSize];
this->actualSize = 0;
this->isLastBlock = false;
}
~BufferBlock()
{
this->bufferSize = 0;
free(this->buffer);
this->buffer = NULL;
this->actualSize = 0;
}
char* buffer;
int bufferSize;
int actualSize;
bool isLastBlock;
};
BufferBlockManager::BufferBlockManager(int numberOfBlocks, int bufferSize)
{
dequeueSleepTime = std::chrono::milliseconds(100);
for (int x = 0; x < numberOfBlocks; x++)
{
BufferBlock* block = new BufferBlock(bufferSize);
blocks.push_front(block);
blocksPendingRead.push(block);
}
}
BufferBlockManager::~BufferBlockManager()
{
for (std::list<BufferBlock*>::const_iterator iterator = blocks.begin(), end = blocks.end(); iterator != end; ++iterator) {
delete (*iterator);
}
}
void BufferBlockManager::enqueueBlockForRead(BufferBlock* block)
{
queueLock.lock();
block->actualSize = 0;
block->isLastBlock = false;
blocksPendingRead.push(block);
queueLock.unlock();
}
void BufferBlockManager::dequeueBlockForRead(BufferBlock** block)
{
WAITFOR:
while (blocksPendingRead.size() == 0)
std::this_thread::sleep_for(dequeueSleepTime);
queueLock.lock();
if (blocksPendingRead.size() == 0)
{
queueLock.unlock();
goto WAITFOR;
}
*block = blocksPendingRead.front();
blocksPendingRead.pop();
queueLock.unlock();
}
void BufferBlockManager::enqueueBlockForWrite(BufferBlock* block)
{
queueLock.lock();
blocksPendingWrite.push(block);
queueLock.unlock();
}
void BufferBlockManager::dequeueBlockForWrite(BufferBlock** block)
{
WAITFOR:
while (blocksPendingWrite.size() == 0)
std::this_thread::sleep_for(dequeueSleepTime);
queueLock.lock();
if (blocksPendingWrite.size() == 0)
{
queueLock.unlock();
goto WAITFOR;
}
*block = blocksPendingWrite.front();
blocksPendingWrite.pop();
queueLock.unlock();
}
void BufferBlockManager::resetState()
{
queueLock.lock();
blocksPendingRead = std::queue<BufferBlock*>();
blocksPendingWrite = std::queue<BufferBlock*>();
for (std::list<BufferBlock*>::const_iterator iterator = blocks.begin(), end = blocks.end(); iterator != end; ++iterator) {
(*iterator)->actualSize = 0;
}
queueLock.unlock();
}
struct AsyncCopyContext
{
AsyncCopyContext(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream)
{
this->bufferBlockManager = bufferBlockManager;
this->readStream = readStream;
this->writeStream = writeStream;
this->readResult = ASYNC_COPY_READ_WRITE_SUCCESS;
this->writeResult = ASYNC_COPY_READ_WRITE_SUCCESS;
}
BufferBlockManager* bufferBlockManager;
ReadStream* readStream;
WriteStream* writeStream;
int readResult;
int writeResult;
};
void ReadStreamThread(AsyncCopyContext* asyncContext)
{
int bytesRead = 0;
BufferBlock* readBuffer = NULL;
int readResult = ASYNC_COPY_READ_WRITE_SUCCESS;
while (
// as long there hasn't been any write errors
asyncContext->writeResult == ASYNC_COPY_READ_WRITE_SUCCESS
// and we haven't had an error reading yet
&& readResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// let's deque a block to read to!
asyncContext->bufferBlockManager->dequeueBlockForRead(&readBuffer);
readResult = asyncContext->readStream->read(readBuffer->buffer, readBuffer->bufferSize, &bytesRead);
readBuffer->actualSize = bytesRead;
readBuffer->isLastBlock = bytesRead == 0;
if (readResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// this was a valid read, go ahead and queue it for writing
asyncContext->bufferBlockManager->enqueueBlockForWrite(readBuffer);
}
else
{
// an error occured reading
asyncContext->readResult = readResult;
// since an error occured, lets queue an block to write indicatiting we are done and there are no more bytes to read
readBuffer->isLastBlock = true;
readBuffer->actualSize = 0;
asyncContext->bufferBlockManager->enqueueBlockForWrite(readBuffer);
}
if (readBuffer->isLastBlock) return;
}
}
void WriteStreamThread(AsyncCopyContext* asyncContext)
{
int bytesWritten = 0;
BufferBlock* writeBuffer = NULL;
int writeResult = ASYNC_COPY_READ_WRITE_SUCCESS;
bool isLastWriteBlock = false;
while (
// as long as there are no errors during reading
asyncContext->readResult == ASYNC_COPY_READ_WRITE_SUCCESS
// and we haven't had an error writing yet
&& writeResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
// lets dequeue a block for writing!
asyncContext->bufferBlockManager->dequeueBlockForWrite(&writeBuffer);
isLastWriteBlock = writeBuffer->isLastBlock;
if (writeBuffer->actualSize > 0)
writeResult = asyncContext->writeStream->write(writeBuffer->buffer, writeBuffer->actualSize, &bytesWritten);
if (writeResult == ASYNC_COPY_READ_WRITE_SUCCESS)
{
asyncContext->bufferBlockManager->enqueueBlockForRead(writeBuffer);
if (isLastWriteBlock) return;
}
else
{
asyncContext->writeResult = writeResult;
asyncContext->bufferBlockManager->enqueueBlockForRead(writeBuffer);
return;
}
}
}
void AsyncCopyStream(BufferBlockManager* bufferBlockManager, ReadStream* readStream, WriteStream* writeStream, int* readResult, int* writeResult)
{
AsyncCopyContext asyncContext(bufferBlockManager, readStream, writeStream);
std::thread readThread(ReadStreamThread, &asyncContext);
std::thread writeThread(WriteStreamThread, &asyncContext);
readThread.join();
writeThread.join();
*readResult = asyncContext.readResult;
*writeResult = asyncContext.writeResult;
}
Usage
#include <stdio.h>
#include <tchar.h>
#include "AsyncReadWrite.h"
struct ReadTestStream : ReadStream
{
int readCount = 0;
int read(char* buffer, int bufferSize, int* bytesRead)
{
printf("Starting read...\n");
memset(buffer, bufferSize, 0);
if (readCount == 10)
{
*bytesRead = 0;
return 0;
}
// pretend this function takes a while!
std::this_thread::sleep_for(std::chrono::milliseconds(100));
char buff[100];
sprintf_s(buff, "This is read number %d\n", readCount);
strcpy_s(buffer, sizeof(buff), buff);
*bytesRead = strlen(buffer);
readCount++;
printf("Finished read...\n");
return 0;
}
};
struct WriteTestStream : WriteStream
{
int write(char* buffer, int bufferSize, int* bytesWritten)
{
printf("Starting write...\n");
// pretend this function takes a while!
std::this_thread::sleep_for(std::chrono::milliseconds(500));
printf(buffer);
printf("Finished write...\n");
return 0;
}
};
int _tmain(int argc, _TCHAR* argv[])
{
BufferBlockManager bufferBlockManager(5, 4096);
ReadTestStream readStream;
WriteTestStream writeStream;
int readResult = 0;
int writeResult = 0;
printf("Starting copy...\n");
AsyncCopyStream(&bufferBlockManager, &readStream, &writeStream, &readResult, &writeResult);
printf("Finished copy... readResult=%d writeResult=%d \n", readResult, writeResult);
getchar();
return 0;
}
EDIT: I put my solution into a GitHub repository here. If you wish to use this code, refer to the repository since it may be more updated than this answer.
Typically, you would just have one thread for each direction that alternates between reads and writes.