C++ recvfrom timeout - c++

I need to implement following behavior: when server starts, it should check for existing servers using broadcast. Then it waits for answer.
But, how to set any timeout for waiting?
int optval = 1;
char buff[BUFF_SIZE];
SOCKADDR_IN addr;
int length = sizeof(addr);
if (setsockopt(s, SOL_SOCKET, SO_BROADCAST, (char*)&optval, sizeof(optval)) == SOCKET_ERROR) throw(errors::SETSOCKOPT);
addr->sin_family = AF_INET;
addr->sin_port = htons(this->serverPort);
addr->sin_addr.s_addr = INADDR_ANY;
sendto(s, this->serverName.c_str(), this->serverName.length() + 1, NULL, (SOCKADDR*)&addr, sizeof(addr));
memset(&addr, NULL, sizeof(addr));
recvfrom(s, buff, BUFF_SIZE, NULL, (SOCKADDR*)&addr, &length);

The common way is to use select() or poll() to wait for an event on a set of filedescriptors. These functions also allow you to specify a timeout. In your case, add the following before the recvfrom() call:
struct pollfd pfd = {.fd = s, .events = POLLIN};
poll(&pfd, 1, 1000);
This will wait 1000 milliseconds. This will exit when a packet has arrived on socket s, or after 1 second, whichever comes first. You can check the return value of poll() to see if it returned because of a packet or because of a timeout.

Set a read timeout with setsockopt() and SO_RCVTIMEO, and handle EAGAIN/EWOULDBLOCK which occurs if the timeout is triggered.

Related

Simple HTTP server on Linux C++ sockets

I try to write simple linux app that will accept http requests. I want to be able to stop listening thread so I use select().
Call sequence is
socket()->setsockopt()->bind()->listen()->select()->accept()->read()
When I send request from Chrome I get 451 bytes long buffer. Then select() immediately returns with success as well as accept() as if another request came. And then `read()' stops the thread waiting, because it has nothing to read.
socket() ok. listenSocket:3
bind() ok
listen() ok
select() got ready connection
accept() ok. socket: 4
reading...
read() ok. NRead: 451
listen() ok
select() got ready connection
accept() ok. socket: 5
reading...
What I do wrong?
What articles/books are worth to read to learn socket programming?
Best wishes,
Yura
Here is the simplified code:
void doServer()
{
const int trueFlag = 1;
m_socketListen = socket(AF_INET, SOCK_STREAM, 0);
// check m_socketListen
nRetVal = setsockopt(m_socketListen, SOL_SOCKET, SO_REUSEADDR, &trueFlag, sizeof(int));
// check
struct sockaddr_in addrServer;
bzero((char*)&addrServer, sizeof(addrServer));
addrServer.sin_family = AF_INET;
addrServer.sin_port = htons(DEFAULT_HTTP_PORT);
addrServer.sin_addr.s_addr = INADDR_ANY;
nRetVal = bind(m_socketListen, (const struct sockaddr* )&addrServer, sizeof(addrServer));
// check
struct sockaddr_in addrClient;
while (true)
{
bzero((char*)&addrClient, sizeof(addrClient));
unsigned int addrSize = sizeof(addrClient);
nRetVal = listen(m_socketListen, SOMAXCONN);
// check
struct timeval timeout;
timeout.tv_sec = 5;
timeout.tv_usec = 0;
fd_set set;
FD_ZERO (&set);
FD_SET (m_socketListen, &set);
nRetVal = select (FD_SETSIZE, &set, NULL, NULL, &timeout);
// check
if (FD_ISSET(m_socketListen, &set))
{
int clientSocket = accept(m_socketListen,
(struct sockaddr*)&addrClient,
&addrSize);
int NRead;
char buffer[BUF_SIZE];
bzero(&buffer, BUF_SIZE);
NRead = read(clientSocket, &buffer, BUF_SIZE);
std::cout << "read() ok. NRead: " << NRead << std::endl;
}
}
}

Set a timeout for recv from socket on Windows

Ih,
I think my code is correct but it doesn't work :(
To set a timeout for recv function on windows i know i must use this code:
DWORD timeout = 2000;
if (setsockopt(listenSocket, SOL_SOCKET, SO_RCVTIMEO, (char*)&timeout, sizeof(DWORD)))
{ perror("setsockopt");
return -1;
}
But it doesn't work.
The code of my server is:
SOCKET listenSocket;
SOCKET remoteSocket= INVALID_SOCKET;
SOCKADDR_IN Server_addr;
SOCKADDR_IN Client_addr;
int sin_size;
short port;
int wsastartup;
int ls_result;
WORD wVersionRequested = 0x0202;
WSADATA wsaData;
wsastartup = WSAStartup(wVersionRequested, &wsaData);
if (wsastartup != NO_ERROR) cout << "Errore WSAStartup()" << endl;
listenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
port = 4000;
Server_addr.sin_family = AF_INET;
Server_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
Server_addr.sin_port = htons(port);
if (bind(listenSocket,(LPSOCKADDR) &Server_addr,sizeof(struct sockaddr)) < 0) {
cout << "Server: error bind." << endl;
closesocket(listenSocket);
return -1;
}
ls_result = listen(listenSocket, SOMAXCONN);
sin_size = sizeof(struct sockaddr_in);
remoteSocket = accept(listenSocket, (struct sockaddr *) &Client_addr, &sin_size);
// SET THE TIME OUT
DWORD timeout = 300;
if (setsockopt(remoteSocket, SOL_SOCKET, SO_RCVTIMEO, (char*)&timeout, sizeof(DWORD)))
{ perror("setsockopt");
return -1;
}
int i=0;
while (i<50){
t_start = clock();
// when client receives the send below it wait 3 seconds and then trasmits the answer
send(remoteSocket, "code of start transmission", sizeof("code of start transmission"), 0);
recv_size=recv(remoteSocket, messaggio, sizeof(messaggio), 0);
printf("time for read= %f second \n", ((double)(end - t_start)) / CLOCKS_PER_SEC);
i=i+1;
}
The client when receives the message "code of start transmission" from servers, it wait 3 seconds and then aswer to server.
I expect time for read is 300 ms and recv_size<0, instead recv_size<0 but time for read is more or less 1.5 seconds (The server waits for the client's message). I don't understand why.
I'm on windows and i'm using eclipse and mingw-w64.
Please someone can help me??
Your code tries to use the socket after it has timed out. This is not a good idea because the socket is still somewhere in the middle of the failed blocking operation and in no shape to start a new operation. There's no way to unwind the portions of the operation that have previously completed and put the socket back where it was before the operation started.
Once a blocking socket operation times out, all you can safely do is close the socket. There is no support for undoing an operation that is partially completed and leaving the socket in any kind of sane state.
If a send or receive operation times out on a socket, the socket state is indeterminate, and should not be used[.] -- MSDN
The SO_RCVTIMEO socket option should never be used in code that's designed to work with sockets. It's a kludge to prevent infinite waits in code that wasn't designed to work with sockets natively. These aren't the droids you're looking for.
To set a timeout for recv function on windows i know i must use this code:
DWORD timeout = 2000;
if (setsockopt(listenSocket, SOL_SOCKET, SO_RCVTIMEO, (char*)&timeout, sizeof(DWORD)))
{ perror("setsockopt");
return -1;
}
No. It should be an int, not a DWORD, but the main problem is that youre are here setting an accept() timeout, as this is the listening socket. You need to set it on the accepted socket(s).

Server socket finishes when client closes connection

I'm trying to create a server socket with C++ in order to accept one client connection at a time. The program successfully creates the server socket and waits for incoming connections but when a connection is closed by the client the program would loop endlessly. Otherwise if the connection is interrupted it would keep waiting for new connections as expected. Any idea why this is happening? Thanks
This is my C++ server code:
int listenfd, connfd, n;
struct sockaddr_in servaddr, cliaddr;
socklen_t clilen;
pid_t childpid;
char mesg[1000];
listenfd = socket(AF_INET, SOCK_STREAM, 0);
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(32000);
bind(listenfd, (struct sockaddr *)&servaddr, sizeof(servaddr));
listen(listenfd, 1024);
while (true) {
clilen = sizeof(cliaddr);
connfd = accept(listenfd, (struct sockaddr *)&cliaddr, &clilen);
if ((childpid = fork()) == 0) {
close (listenfd);
while (true) {
n = recvfrom(connfd, mesg, 1000, 0, (struct sockaddr *)&cliaddr, &clilen);
sendto(connfd, mesg, n, 0, (struct sockaddr *)&cliaddr, sizeof(cliaddr));
mesg[n] = 0;
printf("%d: %s \n", n, mesg);
if (n <= 0) break;
}
close(connfd);
}
}
For some reason when the client closes the connection the program would keep printing -1: even with the if-break clause..
You never close connfd in parent process (when childpid != 0), and you do not properly terminate child process that will try to loop. Your if block should look like :
if ((childpid = fork()) == 0) {
...
close(connfd);
exit(0);
}
else {
close(connfd);
}
But as you say you want to accept one connection at a time, you can simply not fork.
And as seen in other answers :
do not use mesg[n] without testing n >= 0
recvfrom and sendto are overkill for TCP simply use recv and send (or even read and write)
mesg[n] = 0;
This breaks when n<0, ie. socket closed
The problem is your "n" and recvfrom. You are having a TCP client so the recvfrom won't return the correct value.
try to have a look on :
How to send and receive data socket TCP (C/C++)
Edit 1 :
Take note that you do the binding not connect() http://www.beej.us/guide/bgnet/output/html/multipage/recvman.html
means there is an error in recieving data, errno will be set accordingly, please try to check the error flag.
you've written a TCP server, but you use recvfrom and sendto which are specific for connection-less protocols (UDP).
try with recv and send. maybe that might help.

Read from socket freeze

I have problem while reading data from client on server. The read() function will always freeze (block) after all data are readed and waiting for more data what is undesirable for me.
Server program:
soc = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = htons(port);
sin.sin_addr.s_addr = INADDR_ANY;
bind(soc, (struct sockaddr*) &sin, sizeof(sin));
if (listen(soc, MAX))
return;
int socc; // socket for clinet
while (1) {
if ((socc = accept(soc, (struct sockaddr *) &sin, sinlen)) < 0)
break;
while ((result = read(socc, pointer, SIZE)) > 0) {
// after the data are readed, read function will block
}
// do some stuff and write reply to client => will never done
}
Client program:
...
soc = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)
struct sockaddr_in socketAddr;
socketAddr.sin_family = AF_INET;
socketAddr.sin_port = htons(port);
memcpy(&(socketAddr.sin_addr), host->h_addr, host->h_length);
if (connect(soc, (sockaddr *)&socketAddr, sizeof(socketAddr)) == -1)
return;
if (write(soc, req.c_str(), strlen(req.c_str())) < 0)
return;
The main problem is that I don't know how much data will be client sending to server, so the server should read all data from socket and after nothing is coming, leave the reading cycle. But the server read whole message for example (30 bytes) and waiting for more (but no more is coming). The sockets are still opened because the client is waiting for reply from server.
You will need to make your socket non-blocking. The read will immediately exit in that case if there is nothing to be read with a specific error.
Look at C- Unix Sockets - Non-blocking read
As stated earlier use non blocking or add RCV_TIMEOUT to socket.
struct timeval tv;
tv.tv_sec = 30; /* 30 Secs Timeout */
setsockopt(sockid, SOL_SOCKET, SO_RCVTIMEO,(struct timeval *)&tv,sizeof(struct timeval));

*nix & C++ writing a non-blocking socket server

I'm experiencing some issues with rewriting my blocking socket server to a non-blocking version.
Actually, I can't seem to even get socket connected anymore, I've been googling for the most of today, and trying different solutions I find here and there, but none of them seem to work properly...
Currently my server loop just keeps timeouting the select() call, with no new sockets accepted.
Client socket seems to connect on some level, since if I start it, it will block trying to write, and if I close the server, it will inform that connection was reset by peer.
Is the following a correct assumption?
With non-blocking server I should normally open the socket, then set it's flags to non-blocking, bind it, and the start calling select for read file descriptor and wait for it to populate ?
I need to remove old blocking "accept()" call, which was waiting endlessly..
If I try calling accept, it will -1 on me now...
Here is the relevant code I'm trying now
fd_set incoming_sockets;
....
int listener_socket, newsockfd, portno;
socklen_t clilen;
struct sockaddr_in serv_addr, cli_addr;
....
listener_socket = socket(AF_INET, SOCK_STREAM, 0); //get socket handle
int flags = fcntl(listener_socket, F_GETFL, 0);
if( fcntl(listener_socket, F_SETFL, flags | O_NONBLOCK) < 0 )
log_writer->write_to_error_log("Error setting listening socket to non blocking", false);
memset(&serv_addr, 0, sizeof(struct sockaddr_in));
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = INADDR_ANY;
serv_addr.sin_port = htons(portno);
....
if (bind(listener_socket, (struct sockaddr *) &serv_addr,
sizeof(struct sockaddr_in)) < 0)
{
log_writer->write_to_error_log("Unable to bind socket, aborting!", true);
}
....
struct timeval timeout;
timeout.tv_sec = 1;
timeout.tv_usec = 0;
int ready_sockets = 0;
listen(listener_socket,1);
FD_ZERO(&incoming_sockets);
FD_SET(listener_socket, &incoming_sockets);
while(true)
{
ready_sockets = select(listener_socket + 1 , &incoming_sockets, (fd_set * ) 0, (fd_set * ) 0, &timeout );
if(ready_sockets == 0)
{
//I loop here now for ever
std::cout << "no new sockets available, snooze 2\n";
sleep(2);
} else
{
std::cout << "connection received!\n";
Since you don't show the whole loop, I don't know if you do it later, but you should initialize the descriptor sets and timeout structure before every call to select.
You should mover the fd_zero() fd_set() macros inside the loop, select will actually change the bitmasks in the fd_sets (and the timeout value). Reinitialise them on every iteration. Also check for select returning -1 and the associated errno (EPIPE ...)
while(true)
{
FD_ZERO(&incoming_sockets);
FD_SET(listener_socket, &incoming_sockets);
ready_sockets = select(listener_socket + 1 , &incoming_sockets, (fd_set * ) 0, (fd_set * ) 0, &timeout );
if(ready_sockets == 0)
{
... }