Select function in non blocking sockets - c++

I'm building an online game client and when I try to connect to an offline server, my client freezes so I wanted to use non blocking sockets which suits games since there are other tasks need to be done while connecting to the server.
While using non blocking sockets, the connect function always returns the same value regardless of the result, so people here recommended using the select function to find the result of the connection request.
(setting the non blocking socket before connection)
u_long iMode=1;
ioctlsocket(hSocket,FIONBIO,&iMode);
(setting the sockets sets)
FD_ZERO(&Write);
FD_ZERO(&Err);
FD_SET(hSocket, &Write);
FD_SET(hSocket, &Err);
TIMEVAL Timeout;
int TimeoutSec = 10; // timeout after 10 seconds
Timeout.tv_sec = TimeoutSec;
Timeout.tv_usec = 0;
int iResult = select(0, //ignored
NULL, //read
&(client.Write), //Write Check
&(client.Err), //Error Check
&Timeout);
if(iResult)
{
}
else
{
message_login("Error","Can't connect to the server");
}
The select function always returns -1, why?

When select() returns -1 (SOCKET_ERROR), use WSAGetLastError() to find out why it failed.
If the socket is in the Err set when select() exits, use getsockopt(SOL_SOCKET, SO_ERROR) to retrieve the socket error code that tells you why connect() failed.
if(iResult) evaluates as true for any non-zero value, including -1. You need to use if(iResult > 0) instead, as iResult will report the number of sockets that are signaled in any fd_set, 0 on timeout, and -1 on failure.
Try something more like this instead:
u_long iMode = 1;
if (ioctlsocket(hSocket, FIONBIO, &iMode) == SOCKET_ERROR)
{
int errCode = WSAGetLastError();
// use errCode as needed...
message_login("Error", "Can't set socket to non-blocking, error: ..."); // however you supply a variable value to your message...
}
if (connect(client.hSocket, ...) == SOCKET_ERROR)
{
int errCode = WSAGetLastError();
if (errCode != WSAEWOULDBLOCK)
{
// use errCode as needed...
message_login("Error", "Can't connect to the server, error: ..."); // however you supply a variable value...
}
else
{
// only in this condition can you now use select() to wait for connect() to finish...
}
}
TIMEVAL Timeout;
int TimeoutSec = 10; // timeout after 10 seconds
Timeout.tv_sec = TimeoutSec;
Timeout.tv_usec = 0;
int iResult = select(0, //ignored
NULL, //read
&(client.Write), //Write Check
&(client.Err), //Error Check
&Timeout);
if (iResult > 0)
{
if (FD_ISSET(client.hSocket, &(client.Err)))
{
DWORD errCode = 0;
int len = sizeof(errCode);
if (getsockopt(client.hSocket, SOL_SOCKET, SO_ERROR, (char*)&errCode, &len) == 0)
{
// use errCode as needed...
message_login("Error", "Can't connect to the server, error: ..."); // however you supply a variable value to your message...
}
else
message_login("Error", "Can't connect to the server, unknown reason");
}
else
message_login("Success", "Connected to the server");
}
else if (iResult == 0)
{
message_login("Error", "Timeout connecting to the server");
}
else
{
int errCode = WSAGetLastError();
// use errCode as needed...
message_login("Error", "Can't connect to the server, error: ..."); // however you supply a variable value to your message...
}

Related

TCP Connect With Invalid Connection Blocks Port

I'm writing a TCP communication script in c++ to communicate between my computer and an Aldebaran Nao robot.
In general my script is working. However, the trouble I am having is that when I call connect from the client (when the server application is closed or the ethernet connection removed) I get an error that the operation is in progress.
However, once the server application is restarted / ethernet cable reconnected, I still cannot call connect to successfully reestablish a connection. I still get an error that the operation is in progress.
As a note, whenever my client determines that a connection cannot be made, the socket descriptor is closed before reattempting a connection. Here is my code for connecting on the client side:
If there is any more information that would be useful, I would be happy to provide it. This project is relatively large, so I didn't want to include too much irrelevant information here.
TCPStream* TCPConnector::connect(const char* serverIP, int port, int timeoutSec)
{
if (timeoutSec == 0)
{
return connect(serverIP, port);
}
struct sockaddr_in address;
// Store all zeros for address struct.
memset(&address, 0, sizeof(address));
// Configure address struct.
address.sin_family = AF_INET;
address.sin_port = htons(port); // Convert from host to TCP network byte order.
inet_pton(PF_INET, serverIP, &(address.sin_addr)); // Convert IP address to network byte order.
// Create a socket. The socket signature is as follows: socket(int domain, int type, int protocol)
int sd = socket(AF_INET, SOCK_STREAM, 0);
int optval = 1;
if (setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof optval) == -1)
{
std::cout << "failed to set socket option" << std::endl;
}
// Set socket to be non-blocking.
int arg;
arg = fcntl(sd, F_GETFL, NULL);
arg |= O_NONBLOCK;
fcntl(sd, F_SETFL, arg);
// Connect with time limit.
fd_set set;
FD_ZERO(&set); // Clear the set.
FD_SET(sd, &set); // Add our file descriptor to the set.
struct timeval timeout;
timeout.tv_sec = timeoutSec;
timeout.tv_usec = 0;
// If the connect call returns 0, then the connection was established. Otherwise,
// check if the three-way handshake is underway.
if (::connect(sd, (struct sockaddr *)&address, sizeof(address)) < 0)
{
// If the handshake is underway.
if (errno == EINPROGRESS)
{
std::cout << "handshake in progress" << std::endl;
// Designate timeout period.
int ret = select(sd + 1, NULL, &set, NULL, &timeout);
std::cout << "return value from select : " << ret << std::endl;
// Check if timeout or an error occurred.
if (ret <= 0)
{
return NULL;
}
else
{
// Check if select returned 1 due to an error.
int valopt;
socklen_t len = sizeof(int);
getsockopt(sd, SOL_SOCKET, SO_ERROR, (void*)(&valopt), &len);
if (valopt)
{
char * errorMessage = strerror( errno); // get string message from errn
std::string msg (errorMessage);
std::cout << msg << std::endl;
return NULL;
}
}
}
else
{
return NULL;
}
}
// Return socket to blocking mode
arg = fcntl(sd, F_GETFL, NULL);
arg &= (~O_NONBLOCK);
fcntl(sd, F_SETFL, arg);
// Create stream object.
return new TCPStream(sd, &address);
}
Your socket is non-blocking mode (you do it explicitly).
As a result, your connect will return immediately with 'connection is in progress'. When socket is non-blocking, you would need than to poll on this socket and wait for it to become readable and/or writeable - this would mean connection is completed (either successfully or not).
A better option in my view would be to use blocking sockets - I see no reason for you to use non-blocking call here.

c tcp socket non blocking receive timeout

Trying to write a client which will try to receive data till 3 seconds. I have implemented the connect method using select by below code.
//socket creation
m_hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
m_stAddress.sin_family = AF_INET;
m_stAddress.sin_addr.S_un.S_addr = inet_addr(pchIP);
m_stAddress.sin_port = htons(iPort);
m_stTimeout.tv_sec = SOCK_TIMEOUT_SECONDS;
m_stTimeout.tv_usec = 0;
//connecting to server
long iMode = 1;
int iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
connect(m_hSocket, (struct sockaddr *)&m_stAddress, sizeof(m_stAddress));
long iMode = 0;
iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
fd_set stWrite;
FD_ZERO(&stWrite);
FD_SET(m_hSocket, &stWrite);
iResult = select(0, NULL, &stWrite, NULL, &m_stTimeout);
if((iResult > 0) && (FD_ISSET(m_hSocket, &stWrite)))
return true;
But I cannot figure out what I am missing at receiving timeout with below code? It doesn't wait if the server connection got disconnected. It just returns instantly from select method.
Also how can I write a non blocking socket call with timeout for socket send.
long iMode = 1;
int iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
fd_set stRead;
FD_ZERO(&stRead);
FD_SET(m_hSocket, &stRead);
int iRet = select(0, &stRead, NULL, NULL, &m_stTimeout);
if ((iRet > 0) && (FD_ISSET(m_hSocket, &stRead)))
{
while ((iBuffLen-1) > 0)
{
int iRcvLen = recv(m_hSocket, pchBuff, iBuffLen-1, 0);
if (iRcvLen == SOCKET_ERROR)
{
return false;
}
else if (iRcvLen == 0)
{
break;
}
pchBuff += iRcvLen;
iBuffLen -= iRcvLen;
}
}
The first parameter to select should not be 0.
Correct usage of select can be found here :
http://developerweb.net/viewtopic.php?id=2933
the first parameter should be the max value of your socket +1 and take interrupted system calls into account if it is non blocking:
/* Call select() */
do {
FD_ZERO(&readset);
FD_SET(socket_fd, &readset);
result = select(socket_fd + 1, &readset, NULL, NULL, NULL);
} while (result == -1 && errno == EINTR);
This is just example code you probably need the timeout parameter as well.
If you can get EINTR this will complicate your required logic, because if you get EINTR you have to do the same call again, but with the remaining time to wait for.
I think for non blocking mode one needs to check the recv() failure along with a timeout value. That mean first select() will return whether the socket is ready to receive data or not. If yes it will go forward else it will sleep until timeout elapses on the select() method call line. But if the receive fails due to some uncertain situations while inside read loop there we need to manually check for socket error and maximum timeout value. If the socket error continues and timeout elapses we need to break it.
I'm done with my receive timeout logic with non blocking mode.
Please correct me if I am wrong.
bool bReturn = true;
SetNonBlockingMode(true);
//check whether the socket is ready to receive
fd_set stRead;
FD_ZERO(&stRead);
FD_SET(m_hSocket, &stRead);
int iRet = select(0, &stRead, NULL, NULL, &m_stTimeout);
DWORD dwStartTime = GetTickCount();
DWORD dwCurrentTime = 0;
//if socket is not ready this line will be hit after 3 sec timeout and go to the end
//if it is ready control will go inside the read loop and reads data until data ends or
//socket error is getting triggered continuously for more than 3 secs.
if ((iRet > 0) && (FD_ISSET(m_hSocket, &stRead)))
{
while ((iBuffLen-1) > 0)
{
int iRcvLen = recv(m_hSocket, pchBuff, iBuffLen-1, 0);
dwCurrentTime = GetTickCount();
if ((iRcvLen == SOCKET_ERROR) && ((dwCurrentTime - dwStartTime) >= SOCK_TIMEOUT_SECONDS * 1000))
{
bReturn = false;
break;
}
else if (iRcvLen == 0)
{
break;
}
pchBuff += iRcvLen;
iBuffLen -= iRcvLen;
}
}
SetNonBlockingMode(false);
return bReturn;

Non-blocking connect OpenSSL

I created a regular C socket. Upon connect, it returns EWOULDBLOCK/WSAEWOULDBLOCK as expected because I did:
unsigned long int mode = 0;
ioctlsocket(ssl_info->sock, FIONBIO, &mode);
setsockopt(ssl_info->sock, SOL_SOCKET, SO_RCVTIMEO, (char*)&tv, sizeof(tv));
setsockopt(ssl_info->sock, SOL_SOCKET, SO_SNDTIMEO, (char*)&tv, sizeof(tv));
to put the socket in non-blocking mode. After that I do:
ssl = SSL_new(ctx);
SSL_set_fd(ssl, sock);
return SSL_connect(ssl);
However, it returns -1.
I read online that it means I need to handle SSL_ERROR_WANT_READ and SSL_ERROR_WANT_WRITE.
so I did:
int res = -1;
while(res == -1)
{
res = SSL_connect(ssl);
switch (SSL_get_error(ssl, res))
{
case SSL_ERROR_WANT_CONNECT:
MessageBox(NULL, "Connect Error", "", 0);
break;
case SSL_ERROR_WANT_READ: //prints this every time..
MessageBox(NULL, "Read Error", "", 0);
break;
case SSL_ERROR_WANT_WRITE:
MessageBox(NULL, "Write Error", "", 0);
break;
}
SelectSocket(ssl);
}
std::cout<<"Connected!\n";
Where SelectSocket is defined as:
bool SelectSocket(SSL* ssl)
{
if (blockmode)
{
fd_set readfds;
fd_set writefds;
FD_ZERO(&readfds);
FD_ZERO (&writefds);
FD_SET(ssl_info->sock, &readfds);
FD_SET(ssl_info->sock, &writefds);
struct timeval tv = {0};
tv.tv_sec = timeout / 1000;
tv.tv_usec = timeout % 1000;
return select(sock + 1, &readfds, &writefds, NULL, &tv) >= 0;
}
return select(sock + 1, NULL, NULL, NULL, NULL) != SOCKET_ERROR;
}
So how exactly can I get it to connect? I can't seem to be able to read or write anything when the socket is non-blocking :S.
Any ideas?
The (-1) returned by SSL_connect() indicates that the underlying BIO could not satisfy the needs of SSL_connect() to continue the handshake.
Generally, the calling process then must repeat the call after taking appropriate action to satisfy the needs of SSL_connect().
However, when using a non-blocking socket, nothing is to be done; but select() can be used to check for the required condition.
(When using a buffering BIO, like a BIO pair, data must be written into or retrieved out of the BIO before being able to continue.)
Your code actually disables non-blocking I/O. As you are passing 0 as argument value for FIONBIO to ioctlsocket, which is documented as:
FIONBIO
The *argp parameter is a pointer to an unsigned long value. Set *argp to a nonzero value if the nonblocking mode should be enabled, or zero if the nonblocking mode should be disabled. [..]
https://msdn.microsoft.com/en-us/library/windows/desktop/ms738573%28v=vs.85%29.aspx

blocking sockets timeout on receive issues

I'm writing a MFC\C++ program that needs to create a UDP socket to receive datagrams on.
I am using blocking socket (for performance reasons) and have some errors (or misunderstanding) when trying to set timeout for receive calls.
When I set the receive timeout to 100mili using setsockopt() and the receive does timeout - it timeouts after about 600mili.
When I set the receive timeout to 1000mili using setsockopt() and the receive does timeout - it timeouts after about 1600mili.
Why is this ?
Am I doing something wrong ?
My code goes something like this:
WSADATA wsaData;
SOCKET ReceivingSocket;
SOCKADDR_IN ReceiverAddr;
int Port = 2345;
char ReceiveBuf[1024];
int BufLength = 1024;
SOCKADDR_IN SenderAddr;
int SenderAddrSize = sizeof(SenderAddr);
int Ret;
// 100mili timeout
int RecvTimeout = 100 ;
// Initialize Winsock version 2.2
if ((Ret = WSAStartup(MAKEWORD(2,2), &wsaData)) != 0)
{
TRACE("ERROR: WSAStartup failed with error %d\n", Ret);
return;
}
// Create a new socket to receive datagrams
if ((ReceivingSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP))
== INVALID_SOCKET)
{
TRACE("ERROR: socket failed with error %d\n", WSAGetLastError());
WSACleanup();
return;
}
// receive datagrams from all interfaces using port 2345
ReceiverAddr.sin_family = AF_INET;
ReceiverAddr.sin_port = htons(Port);
ReceiverAddr.sin_addr.s_addr = htonl(INADDR_ANY);
// bind
if (bind(ReceivingSocket, (SOCKADDR *)&ReceiverAddr, sizeof(ReceiverAddr))
== SOCKET_ERROR)
{
TRACE("ERROR: bind failed with error %d\n", WSAGetLastError());
closesocket(ReceivingSocket);
WSACleanup();
return;
}
// set receive timeout
if ( setsockopt (
ReceivingSocket,
SOL_SOCKET,
SO_RCVTIMEO,
(const char*)&RecvTimeout,
sizeof(RecvTimeout) ) == SOCKET_ERROR )
{
TRACE("Error using setsockopt\n") ;
closesocket(ReceivingSocket);
WSACleanup();
return;
}
// Receive 10 messages - here the timeout comes to life...
for(int i = 0 ; i < 10 ; i++)
{
TRACE("Before Recv, Ticks=%d\n", GetTickCount()) ;
if ((Ret = recvfrom(ReceivingSocket, ReceiveBuf, BufLength, 0,
(SOCKADDR *)&SenderAddr, &SenderAddrSize)) == SOCKET_ERROR)
{
if(WSAETIMEDOUT == WSAGetLastError())
TRACE("After Recv, Ticks=%d\n\n", GetTickCount()) ;
else
{
TRACE("ERROR: receive failed with error %d\n", WSAGetLastError());
closesocket(ReceivingSocket);
WSACleanup();
return;
}
}
}
closesocket(ReceivingSocket);
WSACleanup();
and the output I get is this:
Before Recv, Ticks=1476485406
After Recv, Ticks=1476486031
Before Recv, Ticks=1476486031
After Recv, Ticks=1476486656
Before Recv, Ticks=1476486656
After Recv, Ticks=1476487281
.
.
.
In addition, when I looked at the MSDN to find out more about SO_RCVTIMEO I noticed the following:
If a send or receive operation times out on a socket, the socket state is indeterminate, and should not be used..."
So basically using SO_RCVTIMEO seems like a bad idea if I do get timeouts.
Am I missing something ?
Use 'timeval' stuct to initialize timeout value.
struct timeval timeout;
timeout.tv_sec = SOCKET_READ_TIMEOUT_SEC; // in milli secs
timeout.tv_usec = 0;

unblocking WSAccept for blocking TCP server sockets

I'm writing a TCP server (blocking socket model).
I'm having trouble implementing a valid normal program exit when the server is waiting (blocking) for new connection attempts on Accept (I use WSAccept).
The code for the server's listening socket is something like this (I omitted error handling and other irrelevant code):
int ErrCode = WSAStartup(MAKEWORD(2,2), &m_wsaData) ;
// Create a new socket to listen and accept new connection attempts
struct addrinfo hints, *res = NULL, *ptr = NULL ;
int rc, count = 0 ;
memset(&hints, 0, sizeof(hints)) ;
hints.ai_family = AF_UNSPEC ;
hints.ai_socktype = SOCK_STREAM ;
hints.ai_protocol = IPPROTO_TCP ;
hints.ai_flags = AI_PASSIVE ;
CString strPort ;
strPort.Format("%d", Port) ;
getaddrinfo(pLocalIp, strPort.GetBuffer(), &hints, &res) ;
strPort.ReleaseBuffer() ;
ptr = res ;
if ((m_Socket = WSASocket(res->ai_family, res->ai_socktype, res->ai_protocol, NULL, 0, 0)) == INVALID_SOCKET)
{
// some error
}
if(bind(m_Socket, (SOCKADDR *)res->ai_addr, res->ai_addrlen) == SOCKET_ERROR)
{
// some error
}
if (listen(m_Socket, SOMAXCONN) == SOCKET_ERROR)
{
// some error
}
So far so good... Then I implemented the WSAccept call inside a thread like this:
SOCKADDR_IN ClientAddr ;
int ClientAddrLen = sizeof(ClientAddr) ;
SOCKET TempS = WSAAccept(m_Socket, (SOCKADDR*) &ClientAddr, &ClientAddrLen, NULL, NULL);
Of course the WSAccept blocks until a new connection attempt is made but if I wish to exit
the program then i need some way to cause WSAccept to exit. I have tried several different approaches:
Attempt to call shutdown and/or closesocket with m_Socket from within another thread failed (program just hangs).
using WSAEventSelect indeed solves this issue but then WSAccept delivers only non-blocking sockets - which is not my intention. (Is there a way to make the sockets blocking?)
I Read about APC and tried to use something like QueueUserAPC(MyAPCProc, m_hThread, 1)) but it didn't work either.
What am I doing wrong ?
Is there a better way to cause this blocking WSAccept to exit ?
Use select() with a timeout to detect when a client connection is actually pending before then calling WSAAccept() to accept it. It works with blocking sockets without putting them into non-blocking mode. That will give your code more opportunities to check if the app is shutting down.
Go with non-blocking accepting socket (WSAEventSelect as you mentioned) and use non-blocking WSAccept. You can make a non-blocking socket that WSAccept returns into blocking socket with ioctlsocket (see msdn).
Do all the other stuff you absoultely have to on shutdown, (maybe you have DB connections to close, or files to flush?), and then call ExitProcess(0). That will stop your listening thread, no problem.
See log4cplus source for my take on this issue. I basically wait on two event objects, one is signaled when connection is being accepted (using WSAEventSelect()) and another is there to interrupt the waiting. The most relevant parts of the source is below. See ServerSocket::accept().
namespace {
static
bool
setSocketBlocking (SOCKET_TYPE s)
{
u_long val = 0;
int ret = ioctlsocket (to_os_socket (s), FIONBIO, &val);
if (ret == SOCKET_ERROR)
{
set_last_socket_error (WSAGetLastError ());
return false;
}
else
return true;
}
static
bool
removeSocketEvents (SOCKET_TYPE s, HANDLE ev)
{
// Clean up socket events handling.
int ret = WSAEventSelect (to_os_socket (s), ev, 0);
if (ret == SOCKET_ERROR)
{
set_last_socket_error (WSAGetLastError ());
return false;
}
else
return true;
}
static
bool
socketEventHandlingCleanup (SOCKET_TYPE s, HANDLE ev)
{
bool ret = removeSocketEvents (s, ev);
ret = setSocketBlocking (s) && ret;
ret = WSACloseEvent (ev) && ret;
return ret;
}
} // namespace
ServerSocket::ServerSocket(unsigned short port)
{
sock = openSocket (port, state);
if (sock == INVALID_SOCKET_VALUE)
{
err = get_last_socket_error ();
return;
}
HANDLE ev = WSACreateEvent ();
if (ev == WSA_INVALID_EVENT)
{
err = WSAGetLastError ();
closeSocket (sock);
sock = INVALID_SOCKET_VALUE;
}
else
{
assert (sizeof (std::ptrdiff_t) >= sizeof (HANDLE));
interruptHandles[0] = reinterpret_cast<std::ptrdiff_t>(ev);
}
}
Socket
ServerSocket::accept ()
{
int const N_EVENTS = 2;
HANDLE events[N_EVENTS] = {
reinterpret_cast<HANDLE>(interruptHandles[0]) };
HANDLE & accept_ev = events[1];
int ret;
// Create event and prime socket to set the event on FD_ACCEPT.
accept_ev = WSACreateEvent ();
if (accept_ev == WSA_INVALID_EVENT)
{
set_last_socket_error (WSAGetLastError ());
goto error;
}
ret = WSAEventSelect (to_os_socket (sock), accept_ev, FD_ACCEPT);
if (ret == SOCKET_ERROR)
{
set_last_socket_error (WSAGetLastError ());
goto error;
}
do
{
// Wait either for interrupt event or actual connection coming in.
DWORD wsawfme = WSAWaitForMultipleEvents (N_EVENTS, events, FALSE,
WSA_INFINITE, TRUE);
switch (wsawfme)
{
case WSA_WAIT_TIMEOUT:
case WSA_WAIT_IO_COMPLETION:
// Retry after timeout or APC.
continue;
// This is interrupt signal/event.
case WSA_WAIT_EVENT_0:
{
// Reset the interrupt event back to non-signalled state.
ret = WSAResetEvent (reinterpret_cast<HANDLE>(interruptHandles[0]));
// Clean up socket events handling.
ret = socketEventHandlingCleanup (sock, accept_ev);
// Return Socket with state set to accept_interrupted.
return Socket (INVALID_SOCKET_VALUE, accept_interrupted, 0);
}
// This is accept_ev.
case WSA_WAIT_EVENT_0 + 1:
{
// Clean up socket events handling.
ret = socketEventHandlingCleanup (sock, accept_ev);
// Finally, call accept().
SocketState st = not_opened;
SOCKET_TYPE clientSock = acceptSocket (sock, st);
int eno = 0;
if (clientSock == INVALID_SOCKET_VALUE)
eno = get_last_socket_error ();
return Socket (clientSock, st, eno);
}
case WSA_WAIT_FAILED:
default:
set_last_socket_error (WSAGetLastError ());
goto error;
}
}
while (true);
error:;
DWORD eno = get_last_socket_error ();
// Clean up socket events handling.
if (sock != INVALID_SOCKET_VALUE)
{
(void) removeSocketEvents (sock, accept_ev);
(void) setSocketBlocking (sock);
}
if (accept_ev != WSA_INVALID_EVENT)
WSACloseEvent (accept_ev);
set_last_socket_error (eno);
return Socket (INVALID_SOCKET_VALUE, not_opened, eno);
}
void
ServerSocket::interruptAccept ()
{
(void) WSASetEvent (reinterpret_cast<HANDLE>(interruptHandles[0]));
}
A not so neat way of solving this problem is by issuing a dummy WSAConnect request from the thread that needs to do the shutdown. If the dummy connect fails, you might resort to ExitProcess as suggested by Martin.
void Drain()
{
if (InterlockedIncrement(&drain) == 1)
{
// Make a dummy connection to unblock wsaaccept
SOCKET ConnectSocket = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, 0);
if (ConnectSocket != INVALID_SOCKET) {
int iResult = WSAConnect(ConnectSocket, result->ai_addr, result->ai_addrlen, 0, 0, 0, 0);
if (iResult != 0) {
printf("Unable to connect to server! %d\n", WSAGetLastError());
}
else
{
closesocket(ConnectSocket);
}
}
}
}