c tcp socket non blocking receive timeout - c++

Trying to write a client which will try to receive data till 3 seconds. I have implemented the connect method using select by below code.
//socket creation
m_hSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
m_stAddress.sin_family = AF_INET;
m_stAddress.sin_addr.S_un.S_addr = inet_addr(pchIP);
m_stAddress.sin_port = htons(iPort);
m_stTimeout.tv_sec = SOCK_TIMEOUT_SECONDS;
m_stTimeout.tv_usec = 0;
//connecting to server
long iMode = 1;
int iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
connect(m_hSocket, (struct sockaddr *)&m_stAddress, sizeof(m_stAddress));
long iMode = 0;
iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
fd_set stWrite;
FD_ZERO(&stWrite);
FD_SET(m_hSocket, &stWrite);
iResult = select(0, NULL, &stWrite, NULL, &m_stTimeout);
if((iResult > 0) && (FD_ISSET(m_hSocket, &stWrite)))
return true;
But I cannot figure out what I am missing at receiving timeout with below code? It doesn't wait if the server connection got disconnected. It just returns instantly from select method.
Also how can I write a non blocking socket call with timeout for socket send.
long iMode = 1;
int iResult = ioctlsocket(m_hSocket, FIONBIO, &iMode);
fd_set stRead;
FD_ZERO(&stRead);
FD_SET(m_hSocket, &stRead);
int iRet = select(0, &stRead, NULL, NULL, &m_stTimeout);
if ((iRet > 0) && (FD_ISSET(m_hSocket, &stRead)))
{
while ((iBuffLen-1) > 0)
{
int iRcvLen = recv(m_hSocket, pchBuff, iBuffLen-1, 0);
if (iRcvLen == SOCKET_ERROR)
{
return false;
}
else if (iRcvLen == 0)
{
break;
}
pchBuff += iRcvLen;
iBuffLen -= iRcvLen;
}
}

The first parameter to select should not be 0.
Correct usage of select can be found here :
http://developerweb.net/viewtopic.php?id=2933
the first parameter should be the max value of your socket +1 and take interrupted system calls into account if it is non blocking:
/* Call select() */
do {
FD_ZERO(&readset);
FD_SET(socket_fd, &readset);
result = select(socket_fd + 1, &readset, NULL, NULL, NULL);
} while (result == -1 && errno == EINTR);
This is just example code you probably need the timeout parameter as well.
If you can get EINTR this will complicate your required logic, because if you get EINTR you have to do the same call again, but with the remaining time to wait for.

I think for non blocking mode one needs to check the recv() failure along with a timeout value. That mean first select() will return whether the socket is ready to receive data or not. If yes it will go forward else it will sleep until timeout elapses on the select() method call line. But if the receive fails due to some uncertain situations while inside read loop there we need to manually check for socket error and maximum timeout value. If the socket error continues and timeout elapses we need to break it.
I'm done with my receive timeout logic with non blocking mode.
Please correct me if I am wrong.
bool bReturn = true;
SetNonBlockingMode(true);
//check whether the socket is ready to receive
fd_set stRead;
FD_ZERO(&stRead);
FD_SET(m_hSocket, &stRead);
int iRet = select(0, &stRead, NULL, NULL, &m_stTimeout);
DWORD dwStartTime = GetTickCount();
DWORD dwCurrentTime = 0;
//if socket is not ready this line will be hit after 3 sec timeout and go to the end
//if it is ready control will go inside the read loop and reads data until data ends or
//socket error is getting triggered continuously for more than 3 secs.
if ((iRet > 0) && (FD_ISSET(m_hSocket, &stRead)))
{
while ((iBuffLen-1) > 0)
{
int iRcvLen = recv(m_hSocket, pchBuff, iBuffLen-1, 0);
dwCurrentTime = GetTickCount();
if ((iRcvLen == SOCKET_ERROR) && ((dwCurrentTime - dwStartTime) >= SOCK_TIMEOUT_SECONDS * 1000))
{
bReturn = false;
break;
}
else if (iRcvLen == 0)
{
break;
}
pchBuff += iRcvLen;
iBuffLen -= iRcvLen;
}
}
SetNonBlockingMode(false);
return bReturn;

Related

ChromeOS TCP Connectivity with Windows - peer resets

I'm working on a server implementation on a Chromebook, using tcp connectivity between the windows client and the ChromeOS server. When a connection is being made, the server (Chromebook) side is sending out 5 packets; first one is the header, the next 3 ones are the information sent and the last one is the footer of the message.
We're using send and recv for sending and receiving the information, and after the header is being sent, the rest of the packets are never received, because the client is receiving error code 10054, "connection reset by peer", before the rest are received, though those are sent.
The sizes of the packets are as follows: Header is 4 bytes, the second packet sent is 2 bytes, the next one is 1 byte and the next one is 8 bytes, and the footer is 4 bytes. Our suspicion was that perhaps the 2 bytes are too small for the OS to send, and it perhaps waits for more data before sending, unlike in Windows where it currently does send those immediately. So we tried using SO_LINGER on the socket, but it didn't help. We also tried using TCP_NODELAY, but it didn't help as well. When attempting not to write to the socket's fd with the timeout (using select) the connection is being broken after the first header is sent.
We know all the packets are sent, because logging the sent packets from the machine shows all packets as sent, and only the first one arrives.
Socket flag used is this only:
setsockopt(s, SOL_SOCKET, SO_REUSEADDR, (const char *) &n, sizeof(n));
Sending a message:
ret = write_timeout(fd, timeout);
if (ret != OK) {
Logger::LogError(PROTOCOL_ERROR, "Write data to socket failed with error %d, while waiting timeout of %u\n", get_last_comm_error(), timeout);
return PROTOCOL_ERROR;
}
while (size) {
ret = send(fd, ptr, size, 0);
ptr += ret;
size -= ret;
if (ret < 0) {
Logger::LogError(PROTOCOL_ERROR, "Transport write failed: %d\n", get_last_comm_error());
return PROTOCOL_ERROR;
}
}
Write_timeout:
int write_timeout(int fd, unsigned int wait_useconds)
{
Logger::LogInfo(__FUNCTION__);
int ret = OK;
if (wait_useconds > 0) {
fd_set write_fdset;
struct timeval timeout;
FD_ZERO(&write_fdset);
FD_SET(fd, &write_fdset);
timeout.tv_sec = 0;
timeout.tv_usec = wait_useconds;
do {
ret = select(fd + 1, NULL, &write_fdset, NULL, &timeout);
} while (ret < 0 && errno == EINTR);
if (ret == OK) {
ret = -1;
errno = ETIMEDOUT;
} else if (ret == 1)
return OK;
}
The receiving end is similar:
ret = read_timeout(fd, timeout);
if (ret != OK) {
Logger::LogError(PROTOCOL_ERROR, "Error while trying to receive data from the host - timeout\n");
return TIMED_OUT;
}
while (size) {
ret = recv(fd, ptr, size, 0);
ptr+=ret;
size-=ret;
if (ret == 0) {
return FAILED_TRANSACTION;
}
if (ret < 0) {
Logger::LogError(PROTOCOL_ERROR, "Transport read failed: %d\n", get_last_comm_error());
return UNKNOWN_ERROR;
}
}
return OK;
And timeout:
int read_timeout(int fd, unsigned int wait_useconds)
{
Logger::LogInfo(__FUNCTION__);
int ret = OK;
if (wait_useconds > 0) {
fd_set read_fdset;
struct timeval timeout;
FD_ZERO(&read_fdset);
FD_SET(fd, &read_fdset);
timeout.tv_sec = 0;
timeout.tv_usec = wait_useconds;
do {
ret = select(fd + 1, &read_fdset, NULL, NULL, &timeout);
} while (ret < 0 && errno == EINTR);
if (ret == OK) {
ret = -1;
errno = ETIMEDOUT;
} else if (ret == 1)
return OK;
}
Our code does work on Windows, but (after modifying it accordingly and) using it on ChromeOS does not seem to work unfortunately.
We're running the server on a Chromebook with version 93 and building the code with that code base as well.
I did try making the second packet 4 bytes as well, but it still does not work and connection is being reset by peer after the first one is received correctly.
Does anyone know if maybe the chrome OS system waits for bigger packets before sending? Or if something else works a little bit different when working with TCP on that OS that needs to be done differently then in Windows?

Setting timeout to recv function

I read from socket using recv function. I have problem when no data available for reading. My programm just stops. I found that I can set timeout using select function. But looks that timeout affects select function itself and recv that goes after select still waits uncontinuously.
fd_set set;
struct timeval timeout;
FD_ZERO(&set); /* clear the set */
FD_SET(s, &set); /* add our file descriptor to the set */
timeout.tv_sec = SOCKET_READ_TIMEOUT_SEC;
timeout.tv_usec = 0;
int rv = select(s, &set, NULL, NULL, &timeout);
if((recv_size = recv(s , rx_tmp , bufSize ,0)) == SOCKET_ERROR)
{
...
}
How to ask recv function return after some timout?
Another way to set a timeout on recv() itself without using select() is to use setsockopt() to set the socket's SO_RCVTIMEO option (on platforms that support it).
On Windows, the code would look like this:
DWORD timeout = SOCKET_READ_TIMEOUT_SEC * 1000;
setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, (char*)&timeout, sizeof(timeout));
//...
recv_size = recv(s, rx_tmp, bufSize, 0);
if (recv_size == SOCKET_ERROR)
{
if (WSAGetLastError() != WSAETIMEDOUT)
//...
}
On other platforms, the code would look like this instead:
struct timeval timeout;
timeout.tv_sec = SOCKET_READ_TIMEOUT_SEC;
timeout.tv_usec = 0;
setsockopt(s, SOL_SOCKET, SO_RCVTIMEO, &timeout, sizeof(timeout));
//...
recv_size = recv(s, rx_tmp, bufSize, 0);
if (recv_size == -1)
{
if ((errno != EAGAIN) && (errno != EWOULDBLOCK))
//...
}
You should check return value of select. select will return 0 in case timeout expired, so you should check for error and call recv only if select returned positive value:
On success, select() and pselect() return the number of file descriptors contained in the three returned descriptor sets (that is, the total number of bits that are set in readfds, writefds, exceptfds) which may be zero if the timeout expires before anything interesting happens.
int rv = select(s + 1, &set, NULL, NULL, &timeout);
if (rv == SOCKET_ERROR)
{
// select error...
}
else if (rv == 0)
{
// timeout, socket does not have anything to read
}
else
{
// socket has something to read
recv_size = recv(s, rx_tmp, bufSize, 0);
if (recv_size == SOCKET_ERROR)
{
// read failed...
}
else if (recv_size == 0)
{
// peer disconnected...
}
else
{
// read successful...
}
}
use the FD_ISSET() macro to test whether there is data to read. If it returns false, don't do the read.
http://linux.die.net/man/3/fd_set

Select function in non blocking sockets

I'm building an online game client and when I try to connect to an offline server, my client freezes so I wanted to use non blocking sockets which suits games since there are other tasks need to be done while connecting to the server.
While using non blocking sockets, the connect function always returns the same value regardless of the result, so people here recommended using the select function to find the result of the connection request.
(setting the non blocking socket before connection)
u_long iMode=1;
ioctlsocket(hSocket,FIONBIO,&iMode);
(setting the sockets sets)
FD_ZERO(&Write);
FD_ZERO(&Err);
FD_SET(hSocket, &Write);
FD_SET(hSocket, &Err);
TIMEVAL Timeout;
int TimeoutSec = 10; // timeout after 10 seconds
Timeout.tv_sec = TimeoutSec;
Timeout.tv_usec = 0;
int iResult = select(0, //ignored
NULL, //read
&(client.Write), //Write Check
&(client.Err), //Error Check
&Timeout);
if(iResult)
{
}
else
{
message_login("Error","Can't connect to the server");
}
The select function always returns -1, why?
When select() returns -1 (SOCKET_ERROR), use WSAGetLastError() to find out why it failed.
If the socket is in the Err set when select() exits, use getsockopt(SOL_SOCKET, SO_ERROR) to retrieve the socket error code that tells you why connect() failed.
if(iResult) evaluates as true for any non-zero value, including -1. You need to use if(iResult > 0) instead, as iResult will report the number of sockets that are signaled in any fd_set, 0 on timeout, and -1 on failure.
Try something more like this instead:
u_long iMode = 1;
if (ioctlsocket(hSocket, FIONBIO, &iMode) == SOCKET_ERROR)
{
int errCode = WSAGetLastError();
// use errCode as needed...
message_login("Error", "Can't set socket to non-blocking, error: ..."); // however you supply a variable value to your message...
}
if (connect(client.hSocket, ...) == SOCKET_ERROR)
{
int errCode = WSAGetLastError();
if (errCode != WSAEWOULDBLOCK)
{
// use errCode as needed...
message_login("Error", "Can't connect to the server, error: ..."); // however you supply a variable value...
}
else
{
// only in this condition can you now use select() to wait for connect() to finish...
}
}
TIMEVAL Timeout;
int TimeoutSec = 10; // timeout after 10 seconds
Timeout.tv_sec = TimeoutSec;
Timeout.tv_usec = 0;
int iResult = select(0, //ignored
NULL, //read
&(client.Write), //Write Check
&(client.Err), //Error Check
&Timeout);
if (iResult > 0)
{
if (FD_ISSET(client.hSocket, &(client.Err)))
{
DWORD errCode = 0;
int len = sizeof(errCode);
if (getsockopt(client.hSocket, SOL_SOCKET, SO_ERROR, (char*)&errCode, &len) == 0)
{
// use errCode as needed...
message_login("Error", "Can't connect to the server, error: ..."); // however you supply a variable value to your message...
}
else
message_login("Error", "Can't connect to the server, unknown reason");
}
else
message_login("Success", "Connected to the server");
}
else if (iResult == 0)
{
message_login("Error", "Timeout connecting to the server");
}
else
{
int errCode = WSAGetLastError();
// use errCode as needed...
message_login("Error", "Can't connect to the server, error: ..."); // however you supply a variable value to your message...
}

Non-blocking connect OpenSSL

I created a regular C socket. Upon connect, it returns EWOULDBLOCK/WSAEWOULDBLOCK as expected because I did:
unsigned long int mode = 0;
ioctlsocket(ssl_info->sock, FIONBIO, &mode);
setsockopt(ssl_info->sock, SOL_SOCKET, SO_RCVTIMEO, (char*)&tv, sizeof(tv));
setsockopt(ssl_info->sock, SOL_SOCKET, SO_SNDTIMEO, (char*)&tv, sizeof(tv));
to put the socket in non-blocking mode. After that I do:
ssl = SSL_new(ctx);
SSL_set_fd(ssl, sock);
return SSL_connect(ssl);
However, it returns -1.
I read online that it means I need to handle SSL_ERROR_WANT_READ and SSL_ERROR_WANT_WRITE.
so I did:
int res = -1;
while(res == -1)
{
res = SSL_connect(ssl);
switch (SSL_get_error(ssl, res))
{
case SSL_ERROR_WANT_CONNECT:
MessageBox(NULL, "Connect Error", "", 0);
break;
case SSL_ERROR_WANT_READ: //prints this every time..
MessageBox(NULL, "Read Error", "", 0);
break;
case SSL_ERROR_WANT_WRITE:
MessageBox(NULL, "Write Error", "", 0);
break;
}
SelectSocket(ssl);
}
std::cout<<"Connected!\n";
Where SelectSocket is defined as:
bool SelectSocket(SSL* ssl)
{
if (blockmode)
{
fd_set readfds;
fd_set writefds;
FD_ZERO(&readfds);
FD_ZERO (&writefds);
FD_SET(ssl_info->sock, &readfds);
FD_SET(ssl_info->sock, &writefds);
struct timeval tv = {0};
tv.tv_sec = timeout / 1000;
tv.tv_usec = timeout % 1000;
return select(sock + 1, &readfds, &writefds, NULL, &tv) >= 0;
}
return select(sock + 1, NULL, NULL, NULL, NULL) != SOCKET_ERROR;
}
So how exactly can I get it to connect? I can't seem to be able to read or write anything when the socket is non-blocking :S.
Any ideas?
The (-1) returned by SSL_connect() indicates that the underlying BIO could not satisfy the needs of SSL_connect() to continue the handshake.
Generally, the calling process then must repeat the call after taking appropriate action to satisfy the needs of SSL_connect().
However, when using a non-blocking socket, nothing is to be done; but select() can be used to check for the required condition.
(When using a buffering BIO, like a BIO pair, data must be written into or retrieved out of the BIO before being able to continue.)
Your code actually disables non-blocking I/O. As you are passing 0 as argument value for FIONBIO to ioctlsocket, which is documented as:
FIONBIO
The *argp parameter is a pointer to an unsigned long value. Set *argp to a nonzero value if the nonblocking mode should be enabled, or zero if the nonblocking mode should be disabled. [..]
https://msdn.microsoft.com/en-us/library/windows/desktop/ms738573%28v=vs.85%29.aspx

unblocking WSAccept for blocking TCP server sockets

I'm writing a TCP server (blocking socket model).
I'm having trouble implementing a valid normal program exit when the server is waiting (blocking) for new connection attempts on Accept (I use WSAccept).
The code for the server's listening socket is something like this (I omitted error handling and other irrelevant code):
int ErrCode = WSAStartup(MAKEWORD(2,2), &m_wsaData) ;
// Create a new socket to listen and accept new connection attempts
struct addrinfo hints, *res = NULL, *ptr = NULL ;
int rc, count = 0 ;
memset(&hints, 0, sizeof(hints)) ;
hints.ai_family = AF_UNSPEC ;
hints.ai_socktype = SOCK_STREAM ;
hints.ai_protocol = IPPROTO_TCP ;
hints.ai_flags = AI_PASSIVE ;
CString strPort ;
strPort.Format("%d", Port) ;
getaddrinfo(pLocalIp, strPort.GetBuffer(), &hints, &res) ;
strPort.ReleaseBuffer() ;
ptr = res ;
if ((m_Socket = WSASocket(res->ai_family, res->ai_socktype, res->ai_protocol, NULL, 0, 0)) == INVALID_SOCKET)
{
// some error
}
if(bind(m_Socket, (SOCKADDR *)res->ai_addr, res->ai_addrlen) == SOCKET_ERROR)
{
// some error
}
if (listen(m_Socket, SOMAXCONN) == SOCKET_ERROR)
{
// some error
}
So far so good... Then I implemented the WSAccept call inside a thread like this:
SOCKADDR_IN ClientAddr ;
int ClientAddrLen = sizeof(ClientAddr) ;
SOCKET TempS = WSAAccept(m_Socket, (SOCKADDR*) &ClientAddr, &ClientAddrLen, NULL, NULL);
Of course the WSAccept blocks until a new connection attempt is made but if I wish to exit
the program then i need some way to cause WSAccept to exit. I have tried several different approaches:
Attempt to call shutdown and/or closesocket with m_Socket from within another thread failed (program just hangs).
using WSAEventSelect indeed solves this issue but then WSAccept delivers only non-blocking sockets - which is not my intention. (Is there a way to make the sockets blocking?)
I Read about APC and tried to use something like QueueUserAPC(MyAPCProc, m_hThread, 1)) but it didn't work either.
What am I doing wrong ?
Is there a better way to cause this blocking WSAccept to exit ?
Use select() with a timeout to detect when a client connection is actually pending before then calling WSAAccept() to accept it. It works with blocking sockets without putting them into non-blocking mode. That will give your code more opportunities to check if the app is shutting down.
Go with non-blocking accepting socket (WSAEventSelect as you mentioned) and use non-blocking WSAccept. You can make a non-blocking socket that WSAccept returns into blocking socket with ioctlsocket (see msdn).
Do all the other stuff you absoultely have to on shutdown, (maybe you have DB connections to close, or files to flush?), and then call ExitProcess(0). That will stop your listening thread, no problem.
See log4cplus source for my take on this issue. I basically wait on two event objects, one is signaled when connection is being accepted (using WSAEventSelect()) and another is there to interrupt the waiting. The most relevant parts of the source is below. See ServerSocket::accept().
namespace {
static
bool
setSocketBlocking (SOCKET_TYPE s)
{
u_long val = 0;
int ret = ioctlsocket (to_os_socket (s), FIONBIO, &val);
if (ret == SOCKET_ERROR)
{
set_last_socket_error (WSAGetLastError ());
return false;
}
else
return true;
}
static
bool
removeSocketEvents (SOCKET_TYPE s, HANDLE ev)
{
// Clean up socket events handling.
int ret = WSAEventSelect (to_os_socket (s), ev, 0);
if (ret == SOCKET_ERROR)
{
set_last_socket_error (WSAGetLastError ());
return false;
}
else
return true;
}
static
bool
socketEventHandlingCleanup (SOCKET_TYPE s, HANDLE ev)
{
bool ret = removeSocketEvents (s, ev);
ret = setSocketBlocking (s) && ret;
ret = WSACloseEvent (ev) && ret;
return ret;
}
} // namespace
ServerSocket::ServerSocket(unsigned short port)
{
sock = openSocket (port, state);
if (sock == INVALID_SOCKET_VALUE)
{
err = get_last_socket_error ();
return;
}
HANDLE ev = WSACreateEvent ();
if (ev == WSA_INVALID_EVENT)
{
err = WSAGetLastError ();
closeSocket (sock);
sock = INVALID_SOCKET_VALUE;
}
else
{
assert (sizeof (std::ptrdiff_t) >= sizeof (HANDLE));
interruptHandles[0] = reinterpret_cast<std::ptrdiff_t>(ev);
}
}
Socket
ServerSocket::accept ()
{
int const N_EVENTS = 2;
HANDLE events[N_EVENTS] = {
reinterpret_cast<HANDLE>(interruptHandles[0]) };
HANDLE & accept_ev = events[1];
int ret;
// Create event and prime socket to set the event on FD_ACCEPT.
accept_ev = WSACreateEvent ();
if (accept_ev == WSA_INVALID_EVENT)
{
set_last_socket_error (WSAGetLastError ());
goto error;
}
ret = WSAEventSelect (to_os_socket (sock), accept_ev, FD_ACCEPT);
if (ret == SOCKET_ERROR)
{
set_last_socket_error (WSAGetLastError ());
goto error;
}
do
{
// Wait either for interrupt event or actual connection coming in.
DWORD wsawfme = WSAWaitForMultipleEvents (N_EVENTS, events, FALSE,
WSA_INFINITE, TRUE);
switch (wsawfme)
{
case WSA_WAIT_TIMEOUT:
case WSA_WAIT_IO_COMPLETION:
// Retry after timeout or APC.
continue;
// This is interrupt signal/event.
case WSA_WAIT_EVENT_0:
{
// Reset the interrupt event back to non-signalled state.
ret = WSAResetEvent (reinterpret_cast<HANDLE>(interruptHandles[0]));
// Clean up socket events handling.
ret = socketEventHandlingCleanup (sock, accept_ev);
// Return Socket with state set to accept_interrupted.
return Socket (INVALID_SOCKET_VALUE, accept_interrupted, 0);
}
// This is accept_ev.
case WSA_WAIT_EVENT_0 + 1:
{
// Clean up socket events handling.
ret = socketEventHandlingCleanup (sock, accept_ev);
// Finally, call accept().
SocketState st = not_opened;
SOCKET_TYPE clientSock = acceptSocket (sock, st);
int eno = 0;
if (clientSock == INVALID_SOCKET_VALUE)
eno = get_last_socket_error ();
return Socket (clientSock, st, eno);
}
case WSA_WAIT_FAILED:
default:
set_last_socket_error (WSAGetLastError ());
goto error;
}
}
while (true);
error:;
DWORD eno = get_last_socket_error ();
// Clean up socket events handling.
if (sock != INVALID_SOCKET_VALUE)
{
(void) removeSocketEvents (sock, accept_ev);
(void) setSocketBlocking (sock);
}
if (accept_ev != WSA_INVALID_EVENT)
WSACloseEvent (accept_ev);
set_last_socket_error (eno);
return Socket (INVALID_SOCKET_VALUE, not_opened, eno);
}
void
ServerSocket::interruptAccept ()
{
(void) WSASetEvent (reinterpret_cast<HANDLE>(interruptHandles[0]));
}
A not so neat way of solving this problem is by issuing a dummy WSAConnect request from the thread that needs to do the shutdown. If the dummy connect fails, you might resort to ExitProcess as suggested by Martin.
void Drain()
{
if (InterlockedIncrement(&drain) == 1)
{
// Make a dummy connection to unblock wsaaccept
SOCKET ConnectSocket = WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, 0);
if (ConnectSocket != INVALID_SOCKET) {
int iResult = WSAConnect(ConnectSocket, result->ai_addr, result->ai_addrlen, 0, 0, 0, 0);
if (iResult != 0) {
printf("Unable to connect to server! %d\n", WSAGetLastError());
}
else
{
closesocket(ConnectSocket);
}
}
}
}