UDP receive waits indefinitely in x64 bit mode? - c++

My code consist of udp server which receive data from an udp client. The code can run in two configurations win32 and x64 in visual studio. If i run the udp server in x32 mode, everything works fine, it receives data. But in x64 bit the receive wait indefinitely. No change in code, single receive command but both behave differently. My udp receive looks like this.
WSADATA wsadata;
int error = WSAStartup(0X0202, &wsadata);
if(error) {
cerr<<"UdpIPV4Server.cpp:- WSAStartup failed"<<endl;
return -1;
}
if ((socket_var = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) == -1) {
cerr<<"UdpIPV4Server.cpp:- socket function failed"<<endl;
return -1;
}
memset((char *) &si_server, 0, sizeof(si_server));
si_server.sin_family = AF_INET;
si_server.sin_port = htons(7888);
char host[NI_MAXHOST] = "10.8.0.2";
if(inet_pton(AF_INET, host, &si_server.sin_addr) != 1) {
cerr<<"UdpIPV4Server.cpp: inet_pton() failed\n";
return -1;
}
if(bind(socket_var,(struct sockaddr *)&si_server,sizeof(si_server)) == -1) {
cerr<<"UdpIPV4Server.cpp:- bind failed: "<<endl;
return -1;
}
char recv_buffer[65534];
int buf_size = 65534;
memset((char*)&si_client, 0, sizeof(struct sockaddr_in));
int si_client_len = sizeof(si_client);
if((recv_len = recvfrom(socket_var, recv_buffer, buf_size, 0, (struct sockaddr *)&si_client, &si_client_len)) == -1) {
cerr<<"udpipv4server.cpp:- recvfrom failed"<<endl;
return recv_len;
}
So What could be the issue, why this change in behaviour?

Related

C/C++: socket() creation fails in the loop, too many open files

I am implementing a client-server TCP socket application. Client is on an OpenWRT Linux router (C based) and writes some data on the socket repeatedly and in a loop at some frequency rate. The Server is on a Linux Ubuntu machine (C/C++ based) and reads data in a loop according to data arrival speed.
Problem: Running the Server and then Client, server keeps reading new data. Both sides work well until the number of data deliveries (# of connections) reaches 1013. After that, the Client stuck at socket(AF_INET,SOCK_STREAM,0) with socket creation failed...: Too many open files. Apparently, the number of open fd approaches ulimit -n = 1024 on client.
I put the snippets of the code which shows the loop structures for Server.cpp and Client.c:
Server.c:
// TCP Socket creation stuff over here (work as they should):
// int sock_ = socket() / bind() / listen()
while (1)
{
socklen_t sizeOfserv_addr = sizeof(serv_addr_);
fd_set set;
struct timeval timeout;
int connfd_;
FD_ZERO(&set);
FD_SET(sock_, &set);
timeout.tv_sec = 10;
timeout.tv_usec = 0;
int rv_ = select(sock_ + 1, &set, NULL, NULL, &timeout);
if(rv_ == -1){
perror("select");
return 1;
}
else if(rv_ == 0){
printf("Client disconnected.."); /* a timeout occured */
close (connfd_);
close (sock_);
}
else{
connfd_ = accept (sock_,(struct sockaddr*)&serv_addr_,(socklen_t*)&sizeOfserv_addr);
if (connfd_ >= 0) {
int ret = read (connfd_, &payload, sizeof(payload)); /* some payload */
if (ret > 0)
printf("Received %d bytes !\n", ret);
close (connfd_); /* Keep parent socket open (sock_) */
}else{
printf("Server acccept failed..\n");
close (connfd_);
close (stcp.sock_);
return 0;
}
}
}
Client.cpp:
while (payload_exist) /* assuming payload_exist is true */
{
struct sockaddr_in servaddr;
int sock;
if (sock = socket(AF_INET, SOCK_STREAM, 0) == -1)
perror("socket creation failed...\n");
int one = 1;
int idletime = 2;
setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one));
setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, &idletime, sizeof(idletime));
setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = inet_addr("192.168.100.12");
servaddr.sin_port = htons(PORT); /* some PORT */
if (connect (sock, (struct sockaddr*)&servaddr, sizeof(servaddr)) != 0){
perror("connect failed...");
return 1;
}
write(sock, (struct sockaddr*)&payload, sizeof(payload)); /* some new payload */
shutdown(sock,SHUT_WR);
bool serverOff = false;
while (!serverOff){
if(read(sock, &res, sizeof(res)) < 0){
serverOff = true;
close(sock);
}
}
}
NOTE: payload is 800 bytes and always gets fully transmitted per one write action. Having both codes defined under int main(), the client keeps creating sockets and sending data, on the other side, server receives all and would automatically close() and leave if client terminates, due to using select(). If I don't terminate the Client, however, by checking some print logs, it is evident that Server successfully receives 1013 payloads before client crashes with socket creation failed...: Too many open files.
Update:
Following the point mentioned by Steffen Ullrich, it turned out that, the client socket fd has no leak, and the existence of a second fd in the original loop (which was left open) was making the ulimit exceed the limit.
if(read(sock, &res, sizeof(res)) < 0){
serverOff = true;
close(sock); /********* Not actually closing sock *********/
}
Your check for end of connection is wrong.
read returns 0 if the other side has shut down the connection and <0 only on error.
if (sock = socket(AF_INET, SOCK_STREAM, 0) == -1)
perror("socket creation failed...\n");
Given the precedence of operators in C this basically says:
sock = ( socket(AF_INET, SOCK_STREAM, 0) == -1) )
if (sock) ...
Assuming that socket(...) will not return an error but a file descriptor (i.e. >=0) the comparison will be false and thus this essentially says sock = 0 while leaking a file descriptor if the fd returned by socket was >0.

Server socket not working properly while running in background

I have two process: Server and client. Both are different sockets. Initially I execute server socket by ./server and then ./client.
But I wanted that server process should listen in background always for the request from client.
Then inplace of executing ./server, i used ./server & . This works fine in the first client call and then when i tried to connect to server it give connection failed
server.cpp
int main(int argc, char const *argv[])
{
int server_fd, new_socket, valread;
struct sockaddr_in address;
int opt = 1;
int addrlen = sizeof(address);
char buffer[1024] = {0};
const char *hello = "Hello from server";
if ((server_fd = socket(AF_INET, SOCK_STREAM, 0)) == 0)
{
perror("socket failed");
exit(EXIT_FAILURE);
}
// Forcefully attaching socket to the port 8080
if (setsockopt(server_fd, SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT,
&opt, sizeof(opt)))
{
perror("setsockopt");
exit(EXIT_FAILURE);
}
address.sin_family = AF_INET;
address.sin_addr.s_addr = INADDR_ANY;
address.sin_addr.s_addr = inet_addr("192.168.2.184");
address.sin_port = htons( PORT );
// Forcefully attaching socket to the port 8080
if (bind(server_fd, (struct sockaddr *)&address,
sizeof(address))<0)
{
perror("bind failed");
exit(EXIT_FAILURE);
}
if (listen(server_fd, 3) < 0)
{
perror("listen");
exit(EXIT_FAILURE);
}
if ((new_socket = accept(server_fd, (struct sockaddr *)&address,
(socklen_t*)&addrlen))<0)
{
perror("accept");
exit(EXIT_FAILURE);
}
valread = read( new_socket , buffer, 1024);
printf("%s\n",buffer );
send(new_socket , hello , strlen(hello) , 0 );
printf("Hello message sent\n");
return 0;
}
client.cpp
int main(int argc, char const *argv[])
{
int sock = 0, valread;
struct sockaddr_in serv_addr;
char *hello = "Hello from client";
char buffer[1024] = {0};
if ((sock = socket(AF_INET, SOCK_STREAM, 0)) < 0)
{
printf("\n Socket creation error \n");
return -1;
}
serv_addr.sin_family = AF_INET;
serv_addr.sin_port = htons(PORT);
// Convert IPv4 and IPv6 addresses from text to binary form
if(inet_pton(AF_INET, "192.168.2.184", &serv_addr.sin_addr)<=0)
{
printf("\nInvalid address/ Address not supported \n");
return -1;
}
if (connect(sock, (struct sockaddr *)&serv_addr, sizeof(serv_addr)) < 0)
{
printf("\nConnection Failed \n");
return -1;
}
send(sock , hello , strlen(hello) , 0 );
printf("Hello message sent\n");
valread = read( sock , buffer, 1024);
printf("%s\n",buffer );
}
You have one misunderstanding with the bash comand lines ampersand (&) operator.
The trailing ampersand directs the shell to run the command in the background, that is, it is forked and run in a separate sub-shell, as a job, asynchronously. The shell will immediately return the return status of 0 for true and continue as normal, either processing further commands in a script or returning the cursor focus back to the user in a Linux terminal.
So, you invoke the program in the background and you can continue immediately to work in the shell.
But this does not mean that your program continues to run. When your program reaches its end, then its process will be terminated. The program stops.
So, what you could do (but definitely should not do) is, to call your program from the shell in a loop.
The correct way is, to build a loop in your server program and continue to accept connections. But then you need either to fork new processes or use a kind of factory, to create new TCP classes or whatever necessary to handle the requests from the client.
All this is not that simple, because the control flow of the program needs to be well designed. Linux has functions like (p)select, (p)poll or epoll to support with such atcivities.
There are also designpatterns like Reactor/Proactor/ACT available. You could implement that. But better to use an existing library.
But for testing purposes you approach is ok.

consideration to speedup tcp/udp connection

I'm using winsock2 library to write a SDK(Software Developement Kit) for a digital board with 1Gbps ethernet on it. the problem I'm facing is that I can not get enough transfer rate(TX/RX) through ethernet uing TCP/UDP. i rather use UDP-RX as a case study to understand each parameter than can speed up my connection. with the code below i can send data from my PC to my device in just ~350 Mbps.
Start Listening:
//Initialize winsock library
if (WSAStartup(MAKEWORD(2, 2), &m_wsa) != 0)
{
error = WSAGetLastError();
}
//Create a socket
if ((m_serversocket = socket(AF_INET, SOCK_DGRAM, 0)) == INVALID_SOCKET)
{
error = WSAGetLastError();
WSACleanup();
return;
}
int buffersize = 0x200000;
int buffersizelen = sizeof(buffersize);
setsockopt(m_serversocket, SOL_SOCKET, SO_RCVBUF, (char*)&buffersize, buffersizelen);
//Prepare the sockaddr_in structure
memset(&m_serveraddress, 0, sizeof(m_serveraddress));
m_serveraddress.sin_family = AF_INET;
inet_pton(AF_INET, SERVER_Address, &m_serveraddress.sin_addr.s_addr);
m_serveraddress.sin_port = htons(SEND_PORT_number);
// Bind the socket to any address and the specified port.
if (bind(m_serversocket, (struct sockaddr *)&m_serveraddress, sizeof(m_serveraddress)) == SOCKET_ERROR)
{
error = WSAGetLastError();
return;
}
Sending file:
sendchunk = 1000;
// Read File and fill the buffer
if ((m_err = fopen_s(&m_send_file, m_send_filename.toLocal8Bit().data(), "rb")) == 0)
{
// start to send buffer
while (!(feof(m_send_file) || m_stopsend))
{
remainsize = fread(read_data, 1, sendfilesize / count, m_send_file);
sndsize_ProgBar += remainsize;
sentsize = 0;
timer.start();
while ((remainsize >= sendchunk) && !(m_stopsend))
{
sendto(m_serversocket, read_data + sentsize, sendchunk, 0, (struct sockaddr *)&m_serveraddress, addrlengh); // 1024
sentsize += sendchunk;
remainsize -= sendchunk;
}
elapsedtimemicrosec = timer.getElapsedTimeInMicroSec();
timer.stop();
rcvRate = (float)(((remainsize + sentsize)) / elapsedtimemicrosec);
emit UpdateSendRate(rcvRate);
}
}
any suggestion to speedup connection?
P.S: i already tried different sendchunk and choose the best.

How to detect "Over Current" event of an USB device?

I have to detect the event "over current" of an USB device.
I'm developing in a Linux system and C/C++ language.
How do I do that?
You can use uevents. Here is some tutorial.
For watching uevents you have to bind specific NETLINK_KOBJECT_UEVENT:
int create_socket()
{
int sock = -1;
int result = 0;
struct sockaddr_nl snl;
memset(&snl, 0x00, sizeof(struct sockaddr_nl));
snl.nl_family = AF_NETLINK;
snl.nl_pid = getpid();
snl.nl_groups = -1;
sock = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);
if (sock != -1)
{
result = bind(sock, (struct sockaddr *) &snl, sizeof(struct sockaddr_nl));
if (result < 0)
{
dbg("bind failed, exit\n");
close(sock);
sock = -1;
}
}
else
dbg("error getting socket, exit\n");
return sock;
}
int main() {
...
sock = create_socket();
while(sock != -1)
{
buflen = recv(sock, &buffer, sizeof(buffer), 0);
// parse buffer for event description
...
}
}
One of the events for sure will indicate the over-current state.
It might help you to look at the source code for your particular root port / host controller.
For example: http://www.spinics.net/lists/linux-usb/msg49451.html explains the overcurrent detection using a particular host controller driver.

C++ UDP. Why is recvfrom() is not blocking?

I am writing some simple client/server code using UDP. The program works fine, but if I only start the client, the recvfrom method does not block. However, when I remove the sendto method, recvfrom starts to block. Any idea of what is going on?
Here is the client side code:
int server_length; /* Length of server struct */
char send_buffer[256] = "hi"; /* Data to send */
time_t current_time; /* Time received */
while(true)
{
/* Tranmsit data to get time */
server_length = sizeof(struct sockaddr_in);
if (sendto(m_oSocket, send_buffer, (int)strlen(send_buffer) + 1, 0, (struct sockaddr *)&m_oServer, server_length) == -1)
{
fprintf(stderr, "Error transmitting data.\n");
continue;
}
/* Receive time */
if (recvfrom(m_oSocket, (char *)&current_time, (int)sizeof(current_time), 0, (struct sockaddr *)&m_oServer, &server_length) < 0)
{
fprintf(stderr, "Error receiving data.\n");
continue;
}
/* Display time */
printf("Current time: %s\n", ctime(&current_time));
Sleep(1000);
}
And here is the initialization:
unsigned short m_iPortnumber;
struct sockaddr_in m_oServer;
struct sockaddr_in m_oClient;
SOCKET m_oSocket;
WSADATA w; /* Used to open Windows connection */
int a1, a2, a3, a4; /* Server address components in xxx.xxx.xxx.xxx form */
a1 = 192;
a2 = 168;
a3 = 2;
a4 = 14;
m_iPortnumber = 52685;
/* Open windows connection */
if (WSAStartup(0x0101, &w) != 0)
{
fprintf(stderr, "Could not open Windows connection.\n");
exit(0);
}
/* Open a datagram socket */
m_oSocket = socket(AF_INET, SOCK_DGRAM, 0);
if (m_oSocket == INVALID_SOCKET)
{
fprintf(stderr, "Could not create socket.\n");
WSACleanup();
exit(0);
}
/* Clear out server struct */
memset((void *)&m_oServer, '\0', sizeof(struct sockaddr_in));
/* Set family and port */
m_oServer.sin_family = AF_INET;
m_oServer.sin_port = htons(m_iPortnumber);
/* Set server address */
m_oServer.sin_addr.S_un.S_un_b.s_b1 = (unsigned char)a1;
m_oServer.sin_addr.S_un.S_un_b.s_b2 = (unsigned char)a2;
m_oServer.sin_addr.S_un.S_un_b.s_b3 = (unsigned char)a3;
m_oServer.sin_addr.S_un.S_un_b.s_b4 = (unsigned char)a4;
/* Clear out client struct */
memset((void *)&m_oClient, '\0', sizeof(struct sockaddr_in));
/* Set family and port */
m_oClient.sin_family = AF_INET;
m_oClient.sin_addr.s_addr=INADDR_ANY;
m_oClient.sin_port = htons(0);
/* Bind local address to socket */
if (bind(m_oSocket, (struct sockaddr *)&m_oClient, sizeof(struct sockaddr_in)) == -1)
{
fprintf(stderr, "Cannot bind address to socket.\n");
closesocket(m_oSocket);
WSACleanup();
exit(0);
}
There are a variety of ways that sendto can fail. Some, such as arp failure, will cause an error during sendto. Other, such as ICMP port unreachable, may be reported when you next use the socket.
Your recvfrom call could actually be fetching the ICMP packet sent in response to your outgoing packet.
Does a second recvfrom block as expected?
Socket required to be set BLOCKING/NON-BLOCKING.
Set BLOCKING
int nMode = 0; // 0: BLOCKING
if (ioctlsocket (objSocket, FIONBIO, &nMode) == SOCKET_ERROR)
{
closesocket(SendingSocket);
WSACleanup();
return iRet;
}
Set NON-BLOCKING
int nMode = 1; // 1: NON-BLOCKING
if (ioctlsocket (objSocket, FIONBIO, &nMode) == SOCKET_ERROR)
{
closesocket(SendingSocket);
WSACleanup();
return iRet;
}
It looks like you're setting up the server socket and the client socket the same way. The initialization looks good for a server, but for the client, you'll want to bind to port 0.
In fact, for both of them you can do INADDR_ANY (IP 0.0.0.0), which doesn't bind to a specific interface, but instead allows any connection on the correct port.