Setting Socket Timeout? - c++

Using sockets, I am not sure how to set the timeout?
thanks
int sock, connected, bytes_recieved;
char send_data [128] , recv_data[128];
SOCKADDR_IN server_addr,client_addr;
int sin_size;
int j = 0;
::socket(AF_INET, SOCK_STREAM, 0);
server_addr.sin_family = AF_INET;
server_addr.sin_port = htons(4000);
server_addr.sin_addr.s_addr = INADDR_ANY;
::bind(sock, (struct sockaddr *)&server_addr, sizeof(struct sockaddr));
::listen(sock, 5);
::fflush(stdout);
while(1)
{
sin_size = sizeof(struct sockaddr_in);
connected = ::accept(sock, (struct sockaddr *)&client_addr, &sin_size);
while (1)
{
j++;
::send(connected, send_data, strlen(send_data), 0);
//dealing with lost communication ?
//and reastablishing communication
//set timeout and reset on timeout error
}
}
::closesocket(sock);

You need to use setsockopt to set the SO_SNDTIMEO and/or SO_RCVTIMEO options.

A socket is in blocking mode by default. If you switch it to non-blocking mode using ioctlsocket(FIONBIO), you can use select() to manage timeouts:
SOCKET sock, connected;
int bytes_recieved;
char send_data [128] , recv_data[128];
SOCKADDR_IN server_addr,client_addr;
int sin_size;
int j = 0, ret;
fd_set fd;
timeval tv;
sock = ::socket(AF_INET, SOCK_STREAM, 0);
server_addr.sin_family = AF_INET;
server_addr.sin_port = htons(4000);
server_addr.sin_addr.s_addr = INADDR_ANY;
::bind(sock, (struct sockaddr *)&server_addr, sizeof(struct sockaddr));
::listen(sock, 1);
::fflush(stdout);
u_long nbio = 1;
::ioctlsocket(sock, FIONBIO, &nbio);
while(1)
{
FD_ZERO(&fd);
FD_SET(sock, &fd);
tv.tv_sec = 5;
tv.tv_usec = 0;
if (select(0, &fd, NULL, NULL, &tv) > 0)
{
sin_size = sizeof(struct sockaddr_in);
connected = ::accept(sock, (struct sockaddr *)&client_addr, &sin_size);
nbio = 1;
::ioctlsocket(connected, FIONBIO, &nbio);
while (1)
{
j++;
if (::send(connected, send_data, strlen(send_data), 0) < 0)
{
//dealing with lost communication ?
//and reastablishing communication
//set timeout and reset on timeout error
if (WSAGetLastError() == WSAEWOULDBLOCK)
{
FD_ZERO(&fd);
FD_SET(connected, &fd);
tv.tv_sec = 5;
tv.tv_usec = 0;
if (select(0, NULL, &fd, NULL, &tv) > 0)
continue;
}
break;
}
}
closesocket(connected);
}
}

you can use:
fd_set fd;
timeval tv;
FD_ZERO(&fd);
FD_SET(sock, &fd);
tv.tv_sec = time_out(second);
tv.tv_usec = 0;
to set timeout for sending,receiving data.

Related

Winsock C++ connect timeout

I'am trying to set my own timeot for connect() function.
my code works well with default connection like this:
bool connectFUNC4(char * ipaddr) {
WSADATA wsa;
struct sockaddr_in server;
if (WSAStartup(MAKEWORD(2, 2), &wsa) != 0)
return false;
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
return false;
server.sin_addr.s_addr = inet_addr(ipaddr);
server.sin_family = AF_INET;
server.sin_port = htons(5577);
if (connect(sock, (struct sockaddr *)&server, sizeof(server)) < 0)
return false;
return true;
}
I understand idea with blocking and not blocking connection, and I've found solution with setting to nonblocking mode and timeout. It always finished successful but communication does not work.
bool connectFUNC3(char * ipaddr) {
WSADATA wsa;
struct sockaddr_in server;
server.sin_addr.s_addr = inet_addr(ipaddr);
server.sin_family = AF_INET;
server.sin_port = htons(5577);
unsigned long block = 1;
ioctlsocket((unsigned int)sock, FIONBIO, &block);
WSAGetLastError();
int ret = connect(sock, (struct sockaddr *)&server, sizeof(server));
timeval time_out;
time_out.tv_sec = 5;
time_out.tv_usec = 0;
fd_set setW, setE;
FD_ZERO(&setW);
FD_SET(sock, &setW);
FD_ZERO(&setE);
FD_SET(sock, &setE);
select(0, NULL, &setW, &setE, &time_out);
bool flag;
if (FD_ISSET(sock, &setW))
{
// connection successful
flag = true;
}
else if (FD_ISSET(sock, &setE))
{
// connection fail
flag = false;
}
else
{
// connection timeout
flag = false;
}
block = 0;
ioctlsocket((unsigned int)sock, FIONBIO, &block);
return flag;
}
Please help to make it work, or to find another solution (multithread not usable in my case). Thank you.
Neither of your functions are checking ANY return values for errors. And when calling select() in non-blocking mode, call it only if connect() fails with a WSAEWOULBLOCK error, and if select() then returns > 0 then you should be checking setE first and not setW.
Try something more like this:
void closesock(SOCKET *s)
{
// preserve current error code
int err = WSAGetLastError();
closesocket(*sock);
*sock = INVALID_SOCKET;
WSASetLastError(err);
}
bool connectFUNC4(char * ipaddr)
{
// you really shouldn't be calling WSAStartup() here.
// Call it at app startup instead...
struct sockaddr_in server = {0};
server.sin_family = AF_INET;
server.sin_addr.s_addr = inet_addr(ipaddr);
server.sin_port = htons(5577);
// ipaddr valid?
if (server.sin_addr.s_addr == INADDR_NONE)
return false;
sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (sock == INVALID_SOCKET)
return false;
if (connect(sock, (struct sockaddr *)&server, sizeof(server)) == SOCKET_ERROR)
{
// connection failed
closesock(&sock);
return false;
}
// connection successful
return true;
}
bool connectFUNC3(char * ipaddr)
{
// you really shouldn't be calling WSAStartup() here.
// Call it at app startup instead...
struct sockaddr_in server = {0};
server.sin_family = AF_INET;
server.sin_addr.s_addr = inet_addr(ipaddr);
server.sin_port = htons(5577);
// ipaddr valid?
if (server.sin_addr.s_addr == INADDR_NONE)
return false;
sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (sock == INVALID_SOCKET)
return false;
// put socked in non-blocking mode...
u_long block = 1;
if (ioctlsocket(sock, FIONBIO, &block) == SOCKET_ERROR)
{
closesock(&sock);
return false;
}
if (connect(sock, (struct sockaddr *)&server, sizeof(server)) == SOCKET_ERROR)
{
if (WSAGetLastError() != WSAEWOULDBLOCK)
{
// connection failed
closesock(&sock);
return false;
}
// connection pending
fd_set setW, setE;
FD_ZERO(&setW);
FD_SET(sock, &setW);
FD_ZERO(&setE);
FD_SET(sock, &setE);
timeval time_out = {0};
time_out.tv_sec = 5;
time_out.tv_usec = 0;
int ret = select(0, NULL, &setW, &setE, &time_out);
if (ret <= 0)
{
// select() failed or connection timed out
closesock(&sock);
if (ret == 0)
WSASetLastError(WSAETIMEDOUT);
return false;
}
if (FD_ISSET(sock, &setE))
{
// connection failed
int err = 0;
getsockopt(sock, SOL_SOCKET, SO_ERROR, &err, sizeof(err));
closesock(&sock);
WSASetLastError(err);
return false;
}
}
// connection successful
// put socked in blocking mode...
block = 0;
if (ioctlsocket(sock, FIONBIO, &block) == SOCKET_ERROR)
{
closesock(&sock);
return false;
}
return true;
}

linux sockets only reciving first chunk of data

I have two programs, on is server and only listening and the client is talking. I send 1mbytes of data in chunks of 64bytes each. I will get the first chunk of 64byte but then my server exits because it failed to get other data. My client is sending all of the data.
void ServerLinux::Receive(){
int sock = 0;
struct sockaddr_in server;
char buffer[this->packageLength];
if ((sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) {
this->Die("Failed to create socket");
}
memset(&server, 0, sizeof(server));
server.sin_family = AF_INET;
server.sin_addr.s_addr = htonl(INADDR_ANY);
server.sin_port = htons(this->port);
if(bind(sock, (struct sockaddr *) &server, sizeof(server)) < 0){
this->Die("Failed to bind the server socket");
}
if(listen(sock, 1) < 0){
this->Die("Failed to listen on server socket");
}
int clientSocket = 0;
struct sockaddr_in client;
socklen_t size = sizeof(client);
if((clientSocket = accept(sock, (struct sockaddr *) &client, &size)) < 0){
this->Die("Failed to accept client");
}
int received = -1;
//This is fine data is recived
if((received = recv(clientSocket, buffer, this->packageLength,0)) < 0){
this->Die("Failed to receive initial bytes from client");
}
std::cout << "Received!" << std::endl;
//Data is not received in this while loop
while(received > 0){
if((received = recv(sock, buffer, this->packageLength,0)) < 0){
this->Die("Failed to receive additional bytes frin client");
}
std::cout << "Received!" << std::endl;
}
}
Why don't you use the same arguments for recv?
works: recv(clientSocket, buffer, this->packageLength,0)
doesn't work recv(sock, buffer, this->packageLength,0)
Change sock to clientSocket.

How to use 2 sockets PF_INET and PF_PACKET at the same time?

I have the following 2 functions
int listen_socket(unsigned int ip, int port, char *inf)
{
struct ifreq interface;
int fd;
struct sockaddr_in addr;
int n = 1;
DEBUG(LOG_INFO, "Opening listen socket on 0x%08x:%d %s\n", ip, port, inf);
if ((fd = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) {
DEBUG(LOG_ERR, "socket call failed: %s", strerror(errno));
return -1;
}
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = ip;
if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (char *) &n, sizeof(n)) == -1) {
close(fd);
return -1;
}
if (setsockopt(fd, SOL_SOCKET, SO_BROADCAST, (char *) &n, sizeof(n)) == -1) {
close(fd);
return -1;
}
strncpy(interface.ifr_ifrn.ifrn_name, inf, IFNAMSIZ);
if (setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE,(char *)&interface, sizeof(interface)) < 0) {
close(fd);
return -1;
}
if (bind(fd, (struct sockaddr *)&addr, sizeof(struct sockaddr)) == -1) {
close(fd);
return -1;
}
return fd;
}
int raw_socket(int ifindex)
{
int fd;
struct sockaddr_ll sock;
DEBUG(LOG_INFO, "Opening raw socket on ifindex %d\n", ifindex);
if ((fd = socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP))) < 0) {
DEBUG(LOG_ERR, "socket call failed: %s", strerror(errno));
return -1;
}
fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
sock.sll_family = AF_PACKET;
sock.sll_protocol = htons(ETH_P_IP);
sock.sll_ifindex = ifindex;
if (bind(fd, (struct sockaddr *) &sock, sizeof(sock)) < 0) {
DEBUG(LOG_ERR, "bind call failed: %s", strerror(errno));
close(fd);
return -1;
}
return fd;
}
Both are socket listener functions.
I used these functions in my application in this way
fd = listen_socket(INADDR_ANY, 67, client_config.interface);
fd2 = raw_socket(client_config.ifindex);
Now if I send packet to my application (with destination = ip of the interface and port=67). What socket should catch my packet? is it fd2 or fd or both?
And if I send a packet to my application (with destination = broacast:255.255.255.0 and port=67). What socket should catch my packet? is it fd2 or fd or both?
Both sockets will receive that packet. As each packet arrives from the network driver to the kernel, it is duplicated and sent to all PF_PACKET (layer 2) sockets. The packet is also sent to the layer 3 (IP/TCP) kernel code and from there, to the addressed socket.
If this didn't happen, running a separate program doing raw packet captures (e. g. wireshark) would prevent any other communications over the network.

Socket does not accept connections

I have a server socket accepting client socket connections. Accept is in a thread
socket creation
int ServerSocket::CreateSocket(int port)
{
listenfd = 0;
struct sockaddr_in serv_addr;
unsigned long iMode = 1;
listenfd = socket(AF_INET, SOCK_STREAM, 0);
memset(&serv_addr, '0', sizeof(serv_addr));
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = htonl(INADDR_ANY);
serv_addr.sin_port = htons(port);
ioctlsocket(listenfd, FIONBIO, &iMode);
if (bind(listenfd, (struct sockaddr*)&serv_addr, sizeof(serv_addr)) < 0)
{
return 0;
}
if (listen(listenfd, 20) < 0)
{
return 0;
}
return listenfd;
}
Socket Accept
void ServerSocket::AcceptClients_1(void * p)
{
struct sockaddr_in cli_addr;
// get a pointer to the ServerSocket object
ServerSocket * pThis = (ServerSocket *)p;
int iResult, cli_len;
cli_len = sizeof(cli_addr);
struct timeval tv = { 0, 1000 };
SOCKET s = pThis->GetSocket();
fd_set rfds;
FD_ZERO(&rfds);
FD_SET(s, &rfds);
while (!pThis->ShutDownRequested)
{
iResult = select(s+1, &rfds, (fd_set *) 0, (fd_set *) 0, &tv);
if(iResult > 0)
{
// never comes here
SOCKET sclient = accept(s, (struct sockaddr *)&cli_addr,
&cli_len);
}
else if (iResult == 0) /// timeout
{
continue;
}
// error comes here are going to accept 2nd time
DWORD dwError = GetLastError();
return;
}
}
The code comes on select(). Returns 0 the first time but second time always returns -1 with error 10022. I don't understand why. Please help.
Make sure your pThis->GetSocket() is correctly returning the listenfd. Also, you should reinitialize cli_len = sizeof(cli_addr); before each call to accept (it's a value-result argument).
iResult=0 does not always mean timeout, for non-blocking sockets, you need to check WSAGetLastError and deal with some error codes, for example WSAEWOULDBLOCK means you need to wait next event on this socket.
http://msdn.microsoft.com/en-us/library/windows/desktop/ms740668(v=vs.85).aspx

c++ tcp server (windows OS) - recv() delays when there is more then 1 connection to the server

When I'm trying to connect to the server with only 1 client, the recv() function on the server does not delay.
But when I'm starting the client console more then 1 time (something like 7 times), there is a delay of something like 2000ms after you send to the server packet with the function send() until the server will print the packet in is console.
Is there any solution without starting a thread for each client? (Windows limits the number of threads for each process).
The code is compiled with Visual Studio 2008, and this is the full server code:
#include <WinSock2.h>
#include <ws2tcpip.h>
#pragma comment(lib, "ws2_32.lib")
#include <Windows.h>
#include <stdio.h>
struct sslv3
{
#define max_clients 1024
private:
int cClient;
public:
SOCKET fd;
int CurrentClient()
{
return cClient;
}
struct client
{
client()
{
Valid = false;
}
bool Valid;
DWORD ip;
WORD port;
char ipstr[33];
char portstr[33];
SOCKET fd;
void StrGen()
{
wsprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip & 0xFF00)/0x100, (ip & 0xFF0000)/0x10000, (ip & 0xFF000000)/0x1000000);
wsprintf(portstr, "%d", port);
}
} clients[max_clients];
//
sslv3(bool server_client)
{
WSADATA wsaData;
WSAStartup(MAKEWORD(2, 2), &wsaData);
cClient = 0;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
//
DWORD timeout = 1;
setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout, sizeof(DWORD));
}
int Bind(WORD port)
{
int ret = 0;
sockaddr_in local;
local.sin_addr.s_addr = htonl(INADDR_ANY);
local.sin_family = AF_INET;
local.sin_port = htons(port);
if((ret = bind(fd, (struct sockaddr *)&local, sizeof(local)))
!= SOCKET_ERROR)
listen(fd, SOMAXCONN);
return ret;
}
int Accept()
{
SOCKET clientfd;
sockaddr_in client;
int addrlen = sizeof(client);
clientfd = accept(fd, (struct sockaddr *)&client, &addrlen);
if(clientfd == -1)
return -1;
clients[cClient].ip = client.sin_addr.S_un.S_addr;
clients[cClient].port = client.sin_port;
clients[cClient].StrGen();
clients[cClient].fd = clientfd;
clients[cClient].Valid = true;
//
DWORD timeout = 1;
setsockopt(clients[cClient].fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout, sizeof(DWORD));
cClient++;
if(cClient >= max_clients)
{
cClient = 0;
return max_clients - 1;
}
return cClient - 1;
}
int Connect(char ip[], WORD port)
{
sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr(ip);
addr.sin_port = htons(port);
return connect(fd, (const struct sockaddr*)&addr, sizeof(addr));
}
int Send(SOCKET sfd, void* buffer, int length)
{
return send(sfd, (char*)buffer, length, 0);
}
int Read(SOCKET sfd, void* buffer, int length)
{
return recv(sfd, (char*)buffer, length, 0);
}
};
sslv3 cssl(true);
DWORD WINAPI ReadThread(void* args)
{
while(true)
{
for(int j = 0; j <= cssl.CurrentClient(); j++)
{
if(cssl.clients[j].Valid)
{
char rpack[1024];
for(int i = 0; i < sizeof(rpack); i++)
rpack[i] = 0;
if(cssl.Read(cssl.clients[j].fd, rpack, sizeof(rpack)) > 0){
printf("%s:%s says: %s\n", cssl.clients[j].ipstr, cssl.clients[j].portstr, rpack);
}
}
}
Sleep(1);
}
return TRUE;
}
int main()
{
cssl.Bind(1234);
CreateThread(0,0,ReadThread,0,0,0);
while(true)
{
Sleep(1);
int cid = cssl.Accept();
if(cid != -1){
printf("%s:%s connected!\n", cssl.clients[cid].ipstr, cssl.clients[cid].portstr);
}
}
return 0;
}
The following is a full client code:
#include <WinSock2.h>
#include <ws2tcpip.h>
#pragma comment(lib, "ws2_32.lib")
#include <Windows.h>
#include <stdio.h>
#include <iostream>
using namespace std;
struct sslv3
{
#define max_clients 1024
private:
int cClient;
public:
SOCKET fd;
int CurrentClient()
{
return cClient;
}
struct client
{
client()
{
Valid = false;
}
bool Valid;
DWORD ip;
WORD port;
char ipstr[33];
char portstr[33];
SOCKET fd;
void StrGen()
{
wsprintf(ipstr, "%d.%d.%d.%d", ip & 0xFF, (ip & 0xFF00)/0x100, (ip & 0xFF0000)/0x10000, (ip & 0xFF000000)/0x1000000);
wsprintf(portstr, "%d", port);
}
} clients[max_clients];
//
sslv3(bool server_client)
{
WSADATA wsaData;
WSAStartup(MAKEWORD(2, 2), &wsaData);
cClient = 0;
fd = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
//
DWORD timeout = 1;
setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout, sizeof(DWORD));
}
int Bind(WORD port)
{
int ret = 0;
sockaddr_in local;
local.sin_addr.s_addr = htonl(INADDR_ANY);
local.sin_family = AF_INET;
local.sin_port = htons(port);
if((ret = bind(fd, (struct sockaddr *)&local, sizeof(local)))
!= SOCKET_ERROR)
listen(fd, SOMAXCONN);
return ret;
}
int Accept()
{
SOCKET clientfd;
sockaddr_in client;
int addrlen = sizeof(client);
clientfd = accept(fd, (struct sockaddr *)&client, &addrlen);
if(clientfd == -1)
return -1;
clients[cClient].ip = client.sin_addr.S_un.S_addr;
clients[cClient].port = client.sin_port;
clients[cClient].StrGen();
clients[cClient].fd = clientfd;
clients[cClient].Valid = true;
//
DWORD timeout = 1;
setsockopt(clients[cClient].fd, SOL_SOCKET, SO_RCVTIMEO, (const char*)&timeout, sizeof(DWORD));
cClient++;
if(cClient >= max_clients)
{
cClient = 0;
return max_clients - 1;
}
return cClient - 1;
}
int Connect(char ip[], WORD port)
{
sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = inet_addr(ip);
addr.sin_port = htons(port);
return connect(fd, (const struct sockaddr*)&addr, sizeof(addr));
}
int Send(SOCKET sfd, void* buffer, int length)
{
return send(sfd, (char*)buffer, length, 0);
}
int Read(SOCKET sfd, void* buffer, int length)
{
return recv(sfd, (char*)buffer, length, 0);
}
};
sslv3 cssl(false);
int main()
{
cssl.Connect("127.0.0.1", 1234);
while(true)
{
printf("say: ");
char buf[1024];
for(int i = 0; i < sizeof(buf); i++)
buf[i] = 0;
cin >> buf;
int len = strlen(buf);
cssl.Send(cssl.fd, buf, len);
}
return 0;
}
The server seems 'idle' for 2 seconds, because some clients are handled after 2 sleeps, 1 second each.
This is clearly not the right way to handle more than one client on a server. You may want to check on select() - reference.
A very good tutorial for socket programming is Beej's