UDP server consumes all processor time in multithreaded program - c++

I'm developing a client/server application. Client and server run on two different machines running Ubuntu 16.04. The client sends two variables that influence the flow of the server part, therefore, I want to decrease packet loss rate as much as possible. My application is thread-based.
In one thread UDP server is running. My project has a GUI implemented using Qt. When I tried to implement UDP server to be blocking, the whole program and the GUI froze until a packet is received. And sometimes even when a packet is received the program didn't response.
Therefore, I thought non-blocking UDP is the best way to do it. I managed to make UDP non-blocking using select(). Now comes the problem. If I set the timeout of recvfrom to be 10ms and the thread is allowed to run every 10ms, almost no packet is lost but, apparently, the UDP thread consumes all the processor time that the program freezes. If I increased the interval of calling the thread or reduced the timeout interval, around 80% of the packets are lost. I know that UDP is connectionless protocol and TCP may be a better option, but I have to use UDP as the client side sends the packets over UDP.
The question is: how can I reduce the packet loss rate without blocking the other threads from executing efficiently?
Following is my code (the based on a Stackoverflow answer which, at the moment, I can't find to refer here).
void receiveUDP(void)
{
fd_set readfds;
static int fd;
static struct timeval tv;
tv.tv_sec = 0;
tv.tv_usec = 10000;
static char buffer[10];
static int length;
if ( (fd = socket(AF_INET, SOCK_DGRAM, 0)) < 0 ) {
perror("socket failed");
return;
}
struct sockaddr_in serveraddr;
memset( &serveraddr, 0, sizeof(serveraddr) );
serveraddr.sin_family = AF_INET;
serveraddr.sin_port = htons( 50037 );
serveraddr.sin_addr.s_addr = htonl( INADDR_ANY );
if( bind(fd, (struct sockaddr *)&serveraddr, sizeof(serveraddr)) < 0)
{
perror("bind failed");
return;
}
fcntl(fd, F_SETFL, O_NONBLOCK);
FD_ZERO(&readfds);
FD_SET(fd, &readfds);
int rv = select(fd+1, &readfds, NULL, NULL, &tv);
if(rv == -1)
{
printf("Error in Select\n");
_exit(0);
}
else if(rv == 0)
{
printf("Timeout\n");
}
else
{
if(FD_ISSET(fd, &readfds))
{
length = recvfrom(fd, buffer, sizeof(buffer) - 1, 0, NULL, 0);
if(length < 0)
{
perror("recvfrom failed");
}
else
{
printf("%d bytes received: \n", length);
}
}
}
close(fd);
}

Related

C/C++: socket() creation fails in the loop, too many open files

I am implementing a client-server TCP socket application. Client is on an OpenWRT Linux router (C based) and writes some data on the socket repeatedly and in a loop at some frequency rate. The Server is on a Linux Ubuntu machine (C/C++ based) and reads data in a loop according to data arrival speed.
Problem: Running the Server and then Client, server keeps reading new data. Both sides work well until the number of data deliveries (# of connections) reaches 1013. After that, the Client stuck at socket(AF_INET,SOCK_STREAM,0) with socket creation failed...: Too many open files. Apparently, the number of open fd approaches ulimit -n = 1024 on client.
I put the snippets of the code which shows the loop structures for Server.cpp and Client.c:
Server.c:
// TCP Socket creation stuff over here (work as they should):
// int sock_ = socket() / bind() / listen()
while (1)
{
socklen_t sizeOfserv_addr = sizeof(serv_addr_);
fd_set set;
struct timeval timeout;
int connfd_;
FD_ZERO(&set);
FD_SET(sock_, &set);
timeout.tv_sec = 10;
timeout.tv_usec = 0;
int rv_ = select(sock_ + 1, &set, NULL, NULL, &timeout);
if(rv_ == -1){
perror("select");
return 1;
}
else if(rv_ == 0){
printf("Client disconnected.."); /* a timeout occured */
close (connfd_);
close (sock_);
}
else{
connfd_ = accept (sock_,(struct sockaddr*)&serv_addr_,(socklen_t*)&sizeOfserv_addr);
if (connfd_ >= 0) {
int ret = read (connfd_, &payload, sizeof(payload)); /* some payload */
if (ret > 0)
printf("Received %d bytes !\n", ret);
close (connfd_); /* Keep parent socket open (sock_) */
}else{
printf("Server acccept failed..\n");
close (connfd_);
close (stcp.sock_);
return 0;
}
}
}
Client.cpp:
while (payload_exist) /* assuming payload_exist is true */
{
struct sockaddr_in servaddr;
int sock;
if (sock = socket(AF_INET, SOCK_STREAM, 0) == -1)
perror("socket creation failed...\n");
int one = 1;
int idletime = 2;
setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one));
setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, &idletime, sizeof(idletime));
setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = inet_addr("192.168.100.12");
servaddr.sin_port = htons(PORT); /* some PORT */
if (connect (sock, (struct sockaddr*)&servaddr, sizeof(servaddr)) != 0){
perror("connect failed...");
return 1;
}
write(sock, (struct sockaddr*)&payload, sizeof(payload)); /* some new payload */
shutdown(sock,SHUT_WR);
bool serverOff = false;
while (!serverOff){
if(read(sock, &res, sizeof(res)) < 0){
serverOff = true;
close(sock);
}
}
}
NOTE: payload is 800 bytes and always gets fully transmitted per one write action. Having both codes defined under int main(), the client keeps creating sockets and sending data, on the other side, server receives all and would automatically close() and leave if client terminates, due to using select(). If I don't terminate the Client, however, by checking some print logs, it is evident that Server successfully receives 1013 payloads before client crashes with socket creation failed...: Too many open files.
Update:
Following the point mentioned by Steffen Ullrich, it turned out that, the client socket fd has no leak, and the existence of a second fd in the original loop (which was left open) was making the ulimit exceed the limit.
if(read(sock, &res, sizeof(res)) < 0){
serverOff = true;
close(sock); /********* Not actually closing sock *********/
}
Your check for end of connection is wrong.
read returns 0 if the other side has shut down the connection and <0 only on error.
if (sock = socket(AF_INET, SOCK_STREAM, 0) == -1)
perror("socket creation failed...\n");
Given the precedence of operators in C this basically says:
sock = ( socket(AF_INET, SOCK_STREAM, 0) == -1) )
if (sock) ...
Assuming that socket(...) will not return an error but a file descriptor (i.e. >=0) the comparison will be false and thus this essentially says sock = 0 while leaking a file descriptor if the fd returned by socket was >0.

Set a timeout for recv from socket on Windows

Ih,
I think my code is correct but it doesn't work :(
To set a timeout for recv function on windows i know i must use this code:
DWORD timeout = 2000;
if (setsockopt(listenSocket, SOL_SOCKET, SO_RCVTIMEO, (char*)&timeout, sizeof(DWORD)))
{ perror("setsockopt");
return -1;
}
But it doesn't work.
The code of my server is:
SOCKET listenSocket;
SOCKET remoteSocket= INVALID_SOCKET;
SOCKADDR_IN Server_addr;
SOCKADDR_IN Client_addr;
int sin_size;
short port;
int wsastartup;
int ls_result;
WORD wVersionRequested = 0x0202;
WSADATA wsaData;
wsastartup = WSAStartup(wVersionRequested, &wsaData);
if (wsastartup != NO_ERROR) cout << "Errore WSAStartup()" << endl;
listenSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
port = 4000;
Server_addr.sin_family = AF_INET;
Server_addr.sin_addr.s_addr = inet_addr("127.0.0.1");
Server_addr.sin_port = htons(port);
if (bind(listenSocket,(LPSOCKADDR) &Server_addr,sizeof(struct sockaddr)) < 0) {
cout << "Server: error bind." << endl;
closesocket(listenSocket);
return -1;
}
ls_result = listen(listenSocket, SOMAXCONN);
sin_size = sizeof(struct sockaddr_in);
remoteSocket = accept(listenSocket, (struct sockaddr *) &Client_addr, &sin_size);
// SET THE TIME OUT
DWORD timeout = 300;
if (setsockopt(remoteSocket, SOL_SOCKET, SO_RCVTIMEO, (char*)&timeout, sizeof(DWORD)))
{ perror("setsockopt");
return -1;
}
int i=0;
while (i<50){
t_start = clock();
// when client receives the send below it wait 3 seconds and then trasmits the answer
send(remoteSocket, "code of start transmission", sizeof("code of start transmission"), 0);
recv_size=recv(remoteSocket, messaggio, sizeof(messaggio), 0);
printf("time for read= %f second \n", ((double)(end - t_start)) / CLOCKS_PER_SEC);
i=i+1;
}
The client when receives the message "code of start transmission" from servers, it wait 3 seconds and then aswer to server.
I expect time for read is 300 ms and recv_size<0, instead recv_size<0 but time for read is more or less 1.5 seconds (The server waits for the client's message). I don't understand why.
I'm on windows and i'm using eclipse and mingw-w64.
Please someone can help me??
Your code tries to use the socket after it has timed out. This is not a good idea because the socket is still somewhere in the middle of the failed blocking operation and in no shape to start a new operation. There's no way to unwind the portions of the operation that have previously completed and put the socket back where it was before the operation started.
Once a blocking socket operation times out, all you can safely do is close the socket. There is no support for undoing an operation that is partially completed and leaving the socket in any kind of sane state.
If a send or receive operation times out on a socket, the socket state is indeterminate, and should not be used[.] -- MSDN
The SO_RCVTIMEO socket option should never be used in code that's designed to work with sockets. It's a kludge to prevent infinite waits in code that wasn't designed to work with sockets natively. These aren't the droids you're looking for.
To set a timeout for recv function on windows i know i must use this code:
DWORD timeout = 2000;
if (setsockopt(listenSocket, SOL_SOCKET, SO_RCVTIMEO, (char*)&timeout, sizeof(DWORD)))
{ perror("setsockopt");
return -1;
}
No. It should be an int, not a DWORD, but the main problem is that youre are here setting an accept() timeout, as this is the listening socket. You need to set it on the accepted socket(s).

How to use both TCP and UDP in one application in c++

I'm developing a server-client project based on Winsock in c++. I have designed the server and the client sides so that they can send and receive text messages and files as well.
Then I decided to go for audio communication between server and client. I've actually implemented that however I've figured it out that I've done everything using TCP protocol and that for the audio communication it is better to use UDP protocol.
Then I've searched over the internet and found out that it is possible to use both TCP and UDP alongside each other.
I've tried to use the UDP protocol but I didn't have any major progresses.
My problem is I use both recv() and recvFrom() in a while loop like this:
while (true)
{
buflen = recv(clientS, buffer, 1024, NULL);
if (buflen > 0)
{
// Send the received buffer
}
else if (buflen == 0)
{
printf("closed\n");
break;
}
buflen = recvfrom(udpS, buffer, 1024, NULL, (struct sockaddr*)&_s, &_size);
But the recvFrom() blocks. I think I haven't done the job properly but I couldn't find out how to do it.
Here Server in C accepting UDP and TCP connections I found a similar question but the answers were just explanations and there were no sample codes to demonstrate the point clearly.
Now I need you to help me undestand clearly how to receive data from both TCP and UPD connections.
Any help is appreciated.
When dealing with multiple sockets at a time, use select() to know which socket has data pending before you read it, eg:
while (true)
{
fd_set rfd;
FD_ZERO(&rfd);
FD_SET(clientS, &rfd);
FD_SET(udpS, &rfd);
struct timeval timeout;
timeout.tv_sec = ...;
timeout.tv_usec = ...;
int ret = select(0, &rfd, NULL, NULL, &timeout);
if (ret == SOCKET_ERROR)
{
// handle error
break;
}
if (ret == 0)
{
// handle timeout
continue;
}
// at least one socket is readable, figure out which one(s)...
if (FD_ISSET(clientS, &rfd))
{
buflen = recv(clientS, buffer, 1024, NULL);
if (buflen == SOCKET_ERROR)
{
// handle error...
printf("error\n");
}
else if (buflen == 0)
{
// handle disconnect...
printf("closed\n");
}
else
{
// handle received data...
}
}
if (FD_ISSET(udpS, &rfd))
{
buflen = recvfrom(udpS, buffer, 1024, NULL, (struct sockaddr*)&_s, &_size);
//...
}
}

Server socket finishes when client closes connection

I'm trying to create a server socket with C++ in order to accept one client connection at a time. The program successfully creates the server socket and waits for incoming connections but when a connection is closed by the client the program would loop endlessly. Otherwise if the connection is interrupted it would keep waiting for new connections as expected. Any idea why this is happening? Thanks
This is my C++ server code:
int listenfd, connfd, n;
struct sockaddr_in servaddr, cliaddr;
socklen_t clilen;
pid_t childpid;
char mesg[1000];
listenfd = socket(AF_INET, SOCK_STREAM, 0);
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
servaddr.sin_port = htons(32000);
bind(listenfd, (struct sockaddr *)&servaddr, sizeof(servaddr));
listen(listenfd, 1024);
while (true) {
clilen = sizeof(cliaddr);
connfd = accept(listenfd, (struct sockaddr *)&cliaddr, &clilen);
if ((childpid = fork()) == 0) {
close (listenfd);
while (true) {
n = recvfrom(connfd, mesg, 1000, 0, (struct sockaddr *)&cliaddr, &clilen);
sendto(connfd, mesg, n, 0, (struct sockaddr *)&cliaddr, sizeof(cliaddr));
mesg[n] = 0;
printf("%d: %s \n", n, mesg);
if (n <= 0) break;
}
close(connfd);
}
}
For some reason when the client closes the connection the program would keep printing -1: even with the if-break clause..
You never close connfd in parent process (when childpid != 0), and you do not properly terminate child process that will try to loop. Your if block should look like :
if ((childpid = fork()) == 0) {
...
close(connfd);
exit(0);
}
else {
close(connfd);
}
But as you say you want to accept one connection at a time, you can simply not fork.
And as seen in other answers :
do not use mesg[n] without testing n >= 0
recvfrom and sendto are overkill for TCP simply use recv and send (or even read and write)
mesg[n] = 0;
This breaks when n<0, ie. socket closed
The problem is your "n" and recvfrom. You are having a TCP client so the recvfrom won't return the correct value.
try to have a look on :
How to send and receive data socket TCP (C/C++)
Edit 1 :
Take note that you do the binding not connect() http://www.beej.us/guide/bgnet/output/html/multipage/recvman.html
means there is an error in recieving data, errno will be set accordingly, please try to check the error flag.
you've written a TCP server, but you use recvfrom and sendto which are specific for connection-less protocols (UDP).
try with recv and send. maybe that might help.

*nix & C++ writing a non-blocking socket server

I'm experiencing some issues with rewriting my blocking socket server to a non-blocking version.
Actually, I can't seem to even get socket connected anymore, I've been googling for the most of today, and trying different solutions I find here and there, but none of them seem to work properly...
Currently my server loop just keeps timeouting the select() call, with no new sockets accepted.
Client socket seems to connect on some level, since if I start it, it will block trying to write, and if I close the server, it will inform that connection was reset by peer.
Is the following a correct assumption?
With non-blocking server I should normally open the socket, then set it's flags to non-blocking, bind it, and the start calling select for read file descriptor and wait for it to populate ?
I need to remove old blocking "accept()" call, which was waiting endlessly..
If I try calling accept, it will -1 on me now...
Here is the relevant code I'm trying now
fd_set incoming_sockets;
....
int listener_socket, newsockfd, portno;
socklen_t clilen;
struct sockaddr_in serv_addr, cli_addr;
....
listener_socket = socket(AF_INET, SOCK_STREAM, 0); //get socket handle
int flags = fcntl(listener_socket, F_GETFL, 0);
if( fcntl(listener_socket, F_SETFL, flags | O_NONBLOCK) < 0 )
log_writer->write_to_error_log("Error setting listening socket to non blocking", false);
memset(&serv_addr, 0, sizeof(struct sockaddr_in));
serv_addr.sin_family = AF_INET;
serv_addr.sin_addr.s_addr = INADDR_ANY;
serv_addr.sin_port = htons(portno);
....
if (bind(listener_socket, (struct sockaddr *) &serv_addr,
sizeof(struct sockaddr_in)) < 0)
{
log_writer->write_to_error_log("Unable to bind socket, aborting!", true);
}
....
struct timeval timeout;
timeout.tv_sec = 1;
timeout.tv_usec = 0;
int ready_sockets = 0;
listen(listener_socket,1);
FD_ZERO(&incoming_sockets);
FD_SET(listener_socket, &incoming_sockets);
while(true)
{
ready_sockets = select(listener_socket + 1 , &incoming_sockets, (fd_set * ) 0, (fd_set * ) 0, &timeout );
if(ready_sockets == 0)
{
//I loop here now for ever
std::cout << "no new sockets available, snooze 2\n";
sleep(2);
} else
{
std::cout << "connection received!\n";
Since you don't show the whole loop, I don't know if you do it later, but you should initialize the descriptor sets and timeout structure before every call to select.
You should mover the fd_zero() fd_set() macros inside the loop, select will actually change the bitmasks in the fd_sets (and the timeout value). Reinitialise them on every iteration. Also check for select returning -1 and the associated errno (EPIPE ...)
while(true)
{
FD_ZERO(&incoming_sockets);
FD_SET(listener_socket, &incoming_sockets);
ready_sockets = select(listener_socket + 1 , &incoming_sockets, (fd_set * ) 0, (fd_set * ) 0, &timeout );
if(ready_sockets == 0)
{
... }