consideration to speedup tcp/udp connection - c++

I'm using winsock2 library to write a SDK(Software Developement Kit) for a digital board with 1Gbps ethernet on it. the problem I'm facing is that I can not get enough transfer rate(TX/RX) through ethernet uing TCP/UDP. i rather use UDP-RX as a case study to understand each parameter than can speed up my connection. with the code below i can send data from my PC to my device in just ~350 Mbps.
Start Listening:
//Initialize winsock library
if (WSAStartup(MAKEWORD(2, 2), &m_wsa) != 0)
{
error = WSAGetLastError();
}
//Create a socket
if ((m_serversocket = socket(AF_INET, SOCK_DGRAM, 0)) == INVALID_SOCKET)
{
error = WSAGetLastError();
WSACleanup();
return;
}
int buffersize = 0x200000;
int buffersizelen = sizeof(buffersize);
setsockopt(m_serversocket, SOL_SOCKET, SO_RCVBUF, (char*)&buffersize, buffersizelen);
//Prepare the sockaddr_in structure
memset(&m_serveraddress, 0, sizeof(m_serveraddress));
m_serveraddress.sin_family = AF_INET;
inet_pton(AF_INET, SERVER_Address, &m_serveraddress.sin_addr.s_addr);
m_serveraddress.sin_port = htons(SEND_PORT_number);
// Bind the socket to any address and the specified port.
if (bind(m_serversocket, (struct sockaddr *)&m_serveraddress, sizeof(m_serveraddress)) == SOCKET_ERROR)
{
error = WSAGetLastError();
return;
}
Sending file:
sendchunk = 1000;
// Read File and fill the buffer
if ((m_err = fopen_s(&m_send_file, m_send_filename.toLocal8Bit().data(), "rb")) == 0)
{
// start to send buffer
while (!(feof(m_send_file) || m_stopsend))
{
remainsize = fread(read_data, 1, sendfilesize / count, m_send_file);
sndsize_ProgBar += remainsize;
sentsize = 0;
timer.start();
while ((remainsize >= sendchunk) && !(m_stopsend))
{
sendto(m_serversocket, read_data + sentsize, sendchunk, 0, (struct sockaddr *)&m_serveraddress, addrlengh); // 1024
sentsize += sendchunk;
remainsize -= sendchunk;
}
elapsedtimemicrosec = timer.getElapsedTimeInMicroSec();
timer.stop();
rcvRate = (float)(((remainsize + sentsize)) / elapsedtimemicrosec);
emit UpdateSendRate(rcvRate);
}
}
any suggestion to speedup connection?
P.S: i already tried different sendchunk and choose the best.

Related

C/C++: socket() creation fails in the loop, too many open files

I am implementing a client-server TCP socket application. Client is on an OpenWRT Linux router (C based) and writes some data on the socket repeatedly and in a loop at some frequency rate. The Server is on a Linux Ubuntu machine (C/C++ based) and reads data in a loop according to data arrival speed.
Problem: Running the Server and then Client, server keeps reading new data. Both sides work well until the number of data deliveries (# of connections) reaches 1013. After that, the Client stuck at socket(AF_INET,SOCK_STREAM,0) with socket creation failed...: Too many open files. Apparently, the number of open fd approaches ulimit -n = 1024 on client.
I put the snippets of the code which shows the loop structures for Server.cpp and Client.c:
Server.c:
// TCP Socket creation stuff over here (work as they should):
// int sock_ = socket() / bind() / listen()
while (1)
{
socklen_t sizeOfserv_addr = sizeof(serv_addr_);
fd_set set;
struct timeval timeout;
int connfd_;
FD_ZERO(&set);
FD_SET(sock_, &set);
timeout.tv_sec = 10;
timeout.tv_usec = 0;
int rv_ = select(sock_ + 1, &set, NULL, NULL, &timeout);
if(rv_ == -1){
perror("select");
return 1;
}
else if(rv_ == 0){
printf("Client disconnected.."); /* a timeout occured */
close (connfd_);
close (sock_);
}
else{
connfd_ = accept (sock_,(struct sockaddr*)&serv_addr_,(socklen_t*)&sizeOfserv_addr);
if (connfd_ >= 0) {
int ret = read (connfd_, &payload, sizeof(payload)); /* some payload */
if (ret > 0)
printf("Received %d bytes !\n", ret);
close (connfd_); /* Keep parent socket open (sock_) */
}else{
printf("Server acccept failed..\n");
close (connfd_);
close (stcp.sock_);
return 0;
}
}
}
Client.cpp:
while (payload_exist) /* assuming payload_exist is true */
{
struct sockaddr_in servaddr;
int sock;
if (sock = socket(AF_INET, SOCK_STREAM, 0) == -1)
perror("socket creation failed...\n");
int one = 1;
int idletime = 2;
setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE, &one, sizeof(one));
setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE, &idletime, sizeof(idletime));
setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one));
bzero(&servaddr, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = inet_addr("192.168.100.12");
servaddr.sin_port = htons(PORT); /* some PORT */
if (connect (sock, (struct sockaddr*)&servaddr, sizeof(servaddr)) != 0){
perror("connect failed...");
return 1;
}
write(sock, (struct sockaddr*)&payload, sizeof(payload)); /* some new payload */
shutdown(sock,SHUT_WR);
bool serverOff = false;
while (!serverOff){
if(read(sock, &res, sizeof(res)) < 0){
serverOff = true;
close(sock);
}
}
}
NOTE: payload is 800 bytes and always gets fully transmitted per one write action. Having both codes defined under int main(), the client keeps creating sockets and sending data, on the other side, server receives all and would automatically close() and leave if client terminates, due to using select(). If I don't terminate the Client, however, by checking some print logs, it is evident that Server successfully receives 1013 payloads before client crashes with socket creation failed...: Too many open files.
Update:
Following the point mentioned by Steffen Ullrich, it turned out that, the client socket fd has no leak, and the existence of a second fd in the original loop (which was left open) was making the ulimit exceed the limit.
if(read(sock, &res, sizeof(res)) < 0){
serverOff = true;
close(sock); /********* Not actually closing sock *********/
}
Your check for end of connection is wrong.
read returns 0 if the other side has shut down the connection and <0 only on error.
if (sock = socket(AF_INET, SOCK_STREAM, 0) == -1)
perror("socket creation failed...\n");
Given the precedence of operators in C this basically says:
sock = ( socket(AF_INET, SOCK_STREAM, 0) == -1) )
if (sock) ...
Assuming that socket(...) will not return an error but a file descriptor (i.e. >=0) the comparison will be false and thus this essentially says sock = 0 while leaking a file descriptor if the fd returned by socket was >0.

Non-IOCP client send/recv error with IOCP server

Please understand that I am new to IOCP and my code may not be so perfect.
I tried many examples from around here, neither one helps me.
My actual problem is in the client side, I have no idea if I am connecting properly to a IOCP server, neither if I send the data properly and recv gives me WSAerror 10038 ...
WSADATA wsd;
struct addrinfo *result = NULL, *ptr = NULL, hints;
WSAOVERLAPPED RecvOverlapped;
SOCKET ConnSocket = INVALID_SOCKET;
WSABUF DataBuf;
DWORD RecvBytes, Flags;
CRITICAL_SECTION criti;
char buffer[DATA_BUFSIZE];
int err = 0;
int rc;
// Load Winsock
rc = WSAStartup(MAKEWORD(2, 2), &wsd);
if (rc != 0) {
return 1;
}
// Make sure the hints struct is zeroed out
SecureZeroMemory((PVOID)& hints, sizeof(struct addrinfo));
// Initialize the hints to retrieve the server address for IPv4
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
rc = getaddrinfo(IP, Port, &hints, &result);
if (rc != 0) {
return 1;
}
for (ptr = result; ptr != NULL; ptr = ptr->ai_next) {
if ((ConnSocket = socket(ptr->ai_family, ptr->ai_socktype, ptr->ai_protocol)) == INVALID_SOCKET){
freeaddrinfo(result);
return 1;
}
rc = connect(ConnSocket, ptr->ai_addr, (int)ptr->ai_addrlen);
if (rc == SOCKET_ERROR) {
if (WSAECONNREFUSED == (err = WSAGetLastError())) {
closesocket(ConnSocket);
ConnSocket = INVALID_SOCKET;
continue;
}
freeaddrinfo(result);
closesocket(ConnSocket);
WSACleanup();
return 1;
}
break;
}
if (ConnSocket == INVALID_SOCKET) {
freeaddrinfo(result);
return 1;
}
int nZero = 0;
// Make sure the RecvOverlapped struct is zeroed out
SecureZeroMemory((PVOID)& RecvOverlapped, sizeof(WSAOVERLAPPED));
// Create an event handle and setup an overlapped structure.
RecvOverlapped.hEvent = WSACreateEvent();
if (RecvOverlapped.hEvent == NULL) {
freeaddrinfo(result);
closesocket(ConnSocket);
return 1;
}
DataBuf.len = DATA_BUFSIZE;
DataBuf.buf = buffer;
// send data to server here?
// removed the packets, it`s not supposed to be public
// Call WSARecv until the peer closes the connection
// or until an error occurs
while (1) {
Flags = 0;
RecvBytes = 0;
rc = WSARecv(ConnSocket, &DataBuf, 1, &RecvBytes, &Flags, &RecvOverlapped, NULL);
if ((rc == SOCKET_ERROR) && (WSA_IO_PENDING != (err = WSAGetLastError()))) {
closesocket(ConnSocket);
break;
}
rc = WSAWaitForMultipleEvents(1, &RecvOverlapped.hEvent, TRUE, INFINITE, TRUE);
if (rc == WSA_WAIT_FAILED) {
break;
}
rc = WSAGetOverlappedResult(ConnSocket, &RecvOverlapped, &RecvBytes, FALSE, &Flags);
if (rc == FALSE) {
break;
}
// here I have a protocol where I read the received data
WSAResetEvent(RecvOverlapped.hEvent);
// If 0 bytes are received, the connection was closed
if (RecvBytes == 0)
break;
}
WSACloseEvent(RecvOverlapped.hEvent);
closesocket(ConnSocket);
freeaddrinfo(result);
WSACleanup();
I expect to be able to send data and receive the response from IOCP, but if I send 3 packets, I receive back 2 only or sometimes even 1, when I am sending 3 packets back.
Can some show me a working example to connect and send+recv data to a IOCP server?
Many thanks!
You're using TCP. TCP is a stream protocol, not a datagram protocol. You cannot tell it what packets to send, and it cannot tell you what packets it received (it doesn't even know because that's handled at the IP layer). It just doesn't work that way.
This sentence is packed with wisdom: "TCP is a bidirectional, connection oriented, byte stream protocol that provides reliable, ordered delivery but does not preserve application message boundaries." Punch "TCP" into your favorite search engine and study until you understand precisely what every word in that sentence means. You will never write reliable, or even correct, TCP code until you do.
Whether the server is using IOCP or some other internal architecture has no effect on clients. That's totally invisible.

UDP receive waits indefinitely in x64 bit mode?

My code consist of udp server which receive data from an udp client. The code can run in two configurations win32 and x64 in visual studio. If i run the udp server in x32 mode, everything works fine, it receives data. But in x64 bit the receive wait indefinitely. No change in code, single receive command but both behave differently. My udp receive looks like this.
WSADATA wsadata;
int error = WSAStartup(0X0202, &wsadata);
if(error) {
cerr<<"UdpIPV4Server.cpp:- WSAStartup failed"<<endl;
return -1;
}
if ((socket_var = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) == -1) {
cerr<<"UdpIPV4Server.cpp:- socket function failed"<<endl;
return -1;
}
memset((char *) &si_server, 0, sizeof(si_server));
si_server.sin_family = AF_INET;
si_server.sin_port = htons(7888);
char host[NI_MAXHOST] = "10.8.0.2";
if(inet_pton(AF_INET, host, &si_server.sin_addr) != 1) {
cerr<<"UdpIPV4Server.cpp: inet_pton() failed\n";
return -1;
}
if(bind(socket_var,(struct sockaddr *)&si_server,sizeof(si_server)) == -1) {
cerr<<"UdpIPV4Server.cpp:- bind failed: "<<endl;
return -1;
}
char recv_buffer[65534];
int buf_size = 65534;
memset((char*)&si_client, 0, sizeof(struct sockaddr_in));
int si_client_len = sizeof(si_client);
if((recv_len = recvfrom(socket_var, recv_buffer, buf_size, 0, (struct sockaddr *)&si_client, &si_client_len)) == -1) {
cerr<<"udpipv4server.cpp:- recvfrom failed"<<endl;
return recv_len;
}
So What could be the issue, why this change in behaviour?

SQLBrowseConnect doesn't seem to enumerate servers on local domain

I am trying to enumerate local SQL instances using SQLBrowseConnect. Generally speaking, this is working fine, but we have one set up which results in an SQLExpress instance not being discovered. Here is the code in question:
SQLSetConnectAttr(hSQLHdbc,
SQL_COPT_SS_BROWSE_SERVER,
_T("(local)"),
SQL_NTS);
CString inputParam = _T("Driver={SQL Server}");
SQLBrowseConnect(hSQLHdbc,
inputParam,
SQL_NTS,
szConnStrOut,
MAX_RET_LENGTH,
&sConnStrOut);
In the failed instance, the code is running on a domain controller. The missing local instance of SQL is an SQLExpress instance (version 9). However, the puzzling thing is that running sqlcmd -L shows the missing instance without any problems.
Am I missing something really silly? Please remember that on other systems and set ups there is no issue.
After much investigation, I couldn't really find out what the problem was specifically. This one machine just would not discover its own instances of SQL using SQLBrowseConnect. I therefore decided to write my own version. Discovering SQL instances turns out to be pretty easy. You just send a broadcast UDP packet to port 1434 containing the payload 0x02 (1 byte) and wait for SQL servers to respond. They respond with one packet per server which details all the instances on that machine. The code required to do this is shown below:
// to enumerate sql instances we simple send 0x02 as a broadcast to port 1434.
// Any SQL servers will then respond with a packet containing all the information
// about installed instances. In this case we only send to the loopback address
// initialise
WSADATA WsaData;
WSAStartup( MAKEWORD(2,2), &WsaData );
SOCKET udpSocket;
struct sockaddr_in serverAddress;
if ((udpSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0)
{
return;
}
// set up the address
serverAddress.sin_family = AF_INET;
serverAddress.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
serverAddress.sin_port = htons(1434);
// the payload
char payload = 0x02;
// config the port for broadcast (not totally necessary right now but maybe in the future)
BOOL broadcast = TRUE;
setsockopt(udpSocket, SOL_SOCKET, SO_BROADCAST, reinterpret_cast<const char*>(&broadcast), sizeof(BOOL));
// receive address info
sockaddr_in RecvAddr;
RecvAddr.sin_family = AF_INET;
RecvAddr.sin_addr.s_addr = htonl(INADDR_ANY);
sockaddr_in SenderAddr;
int SenderAddrSize = sizeof (SenderAddr);
// bind the socket to the receive address info
int iResult = bind(udpSocket, (SOCKADDR *) & RecvAddr, sizeof (RecvAddr));
if (iResult != 0)
{
int a = WSAGetLastError();
return;
}
if (sendto(udpSocket, &payload, 1, 0, (struct sockaddr *) &serverAddress, sizeof(serverAddress)) < 0)
{
int a = WSAGetLastError();
return;
}
// set up a select so that if we don't get a timely response we just bomb out.
fd_set fds ;
int n ;
struct timeval tv ;
// Set up the file descriptor set.
FD_ZERO(&fds) ;
FD_SET(udpSocket, &fds) ;
// Set up the struct timeval for the timeout.
tv.tv_sec = 5 ;
tv.tv_usec = 0 ;
// Wait until timeout or data received.
n = select ( (int)udpSocket, &fds, NULL, NULL, &tv ) ;
if ( n == 0)
{
// timeout
return;
}
else if( n == -1 )
{
// error
return;
}
// receive buffer
char RecvBuf[1024];
int BufLen = 1024;
memset(RecvBuf, 0, BufLen);
iResult = recvfrom(udpSocket,
RecvBuf,
BufLen,
0,
(SOCKADDR *) & SenderAddr,
&SenderAddrSize);
if (iResult == SOCKET_ERROR)
{
int a = WSAGetLastError();
return;
}
// we have received some data. However we need to parse it to get the info we require
if (iResult > 0)
{
// parse the string as required here. However, note that in my tests, I noticed
// that the first 3 bytes always seem to be junk values and will mess with string
// manipulation functions if not removed. Perhaps this is why SQLBrowseConnect
// was having problems for me???
}

How to detect "Over Current" event of an USB device?

I have to detect the event "over current" of an USB device.
I'm developing in a Linux system and C/C++ language.
How do I do that?
You can use uevents. Here is some tutorial.
For watching uevents you have to bind specific NETLINK_KOBJECT_UEVENT:
int create_socket()
{
int sock = -1;
int result = 0;
struct sockaddr_nl snl;
memset(&snl, 0x00, sizeof(struct sockaddr_nl));
snl.nl_family = AF_NETLINK;
snl.nl_pid = getpid();
snl.nl_groups = -1;
sock = socket(PF_NETLINK, SOCK_DGRAM, NETLINK_KOBJECT_UEVENT);
if (sock != -1)
{
result = bind(sock, (struct sockaddr *) &snl, sizeof(struct sockaddr_nl));
if (result < 0)
{
dbg("bind failed, exit\n");
close(sock);
sock = -1;
}
}
else
dbg("error getting socket, exit\n");
return sock;
}
int main() {
...
sock = create_socket();
while(sock != -1)
{
buflen = recv(sock, &buffer, sizeof(buffer), 0);
// parse buffer for event description
...
}
}
One of the events for sure will indicate the over-current state.
It might help you to look at the source code for your particular root port / host controller.
For example: http://www.spinics.net/lists/linux-usb/msg49451.html explains the overcurrent detection using a particular host controller driver.