C non-blocking send not working - c++

I'm trying to send the large amount of data to the server which should accept the data and parse it. So as I know, when you send() the data in blocking mode in one call, it splits data into chunks and then sends the chunks to the target. But I need to mark each chunk with a small identifier in the beginning of the data (let's say I'm placing a header in each chunk), so I decided to use non- blocking send. I thought, when I do non-blocking send, it sends the max the buffer allows and then returns, leaving the chunking work for me, but it seems that's not happening.
My code is:
struct sockaddr_in target;
SOCKET connection = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
target.sin_family = AF_INET;
target.sin_addr.s_addr = inet_addr(host);
target.sin_port = htons(port);
ULONG NonBlock;
NonBlock = 1;
if (ioctlsocket(connection, FIONBIO, &NonBlock) == SOCKET_ERROR)
{
return WSAGetLastError();
}
fd_set write_fds;
FD_ZERO(&write_fds);
FD_SET(connection, &write_fds);
struct timeval tv;
tv.tv_sec=1;
tv.tv_usec=0;
int result = connect(connection,(SOCKADDR*)&target,sizeof(target));
if(result==SOCKET_ERROR)
{
while(true)
{
result= select(connection+1,NULL,&write_fds,NULL,&tv);
printf("connect: result=%d\r\n",result);
if(result== -1)
{
return WSAGetLastError();
}
else break;
}
}
//later on
fd_set write_set;
int bytes_sent= 0;
int total_sent = 0;
int length = 0;
char *tmp = malloc(sizeof(header)+data_size); //data_size is the size of the large buffer
memcpy(tmp,packet,sizeof(header));
memcpy(tmp+sizeof(header),data,data_size);
int result;
FD_ZERO(&write_set);
FD_SET(connection,&write_set);
struct timeval time_out;
time_out.tv_sec=0;
time_out.tv_usec=1500;
while(total_sent < data_size)
{
length= (data_size+sizeof(my_header))-total_sent;
result = select(connection+1,NULL,&write_set,NULL,&time_out);
if(result== SOCKET_ERROR) return -1;
if(result!=0 && FD_ISSET(connection, &write_set))
{
bytes_sent = send(connection,tmp,length,0);
}
if(bytes_sent == SOCKET_ERROR)
{
return SOCKET_ERROR;
}
if(bytes_sent > 0)
{
//here i need to append a header to the new chunk
}
else break;
}
So basically my question is: why the send on non-blocking socket, still blocks and doesn't return after sending the first chunk, and acts just like regular blocking send? What i want to achieve is send() sending one chunk of data of the length that the system allows, so i put the length of the whole data, assuming that non-blocking send will return after sending the first chunk, because the buffer is to big, to be sent as one block.
UPDATE some runnable code:
#include <windows.h>
#include <winsock.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <types.h>
typedef struct hdr{
uint8_t super_id;
}my_header,*pmy_header;
SOCKET connection;
int start_winsock()
{
WSADATA check;
int result = WSAStartup(MAKEWORD(2,2),&check);
return result;
}
int create_connection(char* host,int port)
{
struct sockaddr_in target;
connection = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
target.sin_family = AF_INET;
target.sin_addr.s_addr = inet_addr(host);
target.sin_port = htons(port);
int result = UnblockSocket();
if(result!=0) return WSAGetLastError();
fd_set write_fds;
FD_ZERO(&write_fds);
FD_SET(connection, &write_fds);
struct timeval tv;
tv.tv_sec=1;
tv.tv_usec=0;
result = connect(connection,(SOCKADDR*)&target,sizeof(target));
if(result==SOCKET_ERROR)
{
while(true)
{
result= select(connection+1,NULL,&write_fds,NULL,&tv);
if(result== -1)
{
return WSAGetLastError();
}
else break;
}
}
return 0;
}
int UnblockSocket()
{
ULONG NonBlock;
NonBlock = 1;
if (ioctlsocket(connection, FIONBIO, &NonBlock) == SOCKET_ERROR)
{
return WSAGetLastError();
}
return 0;
}
int SendMyData(pmy_header header,char * data,int data_size)
{
fd_set write_set;
int bytes_sent= 0;
int total_sent = 0;
int length = 0;
char *tmp = malloc(sizeof(my_header)+data_size);
memcpy(tmp,packet,sizeof(my_header));
memcpy(tmp+sizeof(my_header),data,data_size);
int result;
FD_ZERO(&write_set);
FD_SET(connection,&write_set);
struct timeval time_out;
time_out.tv_sec=0;
time_out.tv_usec=1500;
header->super_id=0xdead;
while(total_sent < data_size)
{
length= (data_size+sizeof(my_header))-total_sent;
if(result== SOCKET_ERROR) return -1;
if(result!=0 && FD_ISSET(connection, &write_set))
{
bytes_sent = send(connection,tmp,length,0);
}
printf("bytes sent per iteration=%d\n",bytes_sent);
if(bytes_sent == SOCKET_ERROR)
{
return SOCKET_ERROR;
}
if(bytes_sent > 0)
{
total_sent+= bytes_sent-sizeof(my_header);
tmp = realloc(tmp,sizeof(my_header)+(data_size-total_sent));
memcpy(tmp,header,sizeof(my_header));
memcpy(tmp+sizeof(my_header),data,data_size-total_sent);
}
else break;
}
free(tmp);
return total_sent;
}
int main(int argc, char *argv[])
{
start_winsock();
int result = create_connection("2.2.2.2",88);
if(result!=0) { printf("Cannot connect\n"); return 0; }
pmy_header *header = malloc(sizeof(my_header));
int buf_size = 500000;
char buffer_test[buf_size];
ZeroMemory(buffer_test,buf_size);
int count=0;
for(count;count<buf_size;count++)
{
strcat(buffer_test,"4");
}
result = SendMyData(header,buffer_test,buf_size);
}

send() is not guaranteed to send everything you ask it to send. It may send less. You MUST take the return value into account. If it is less than the amount you requested, you have to call send() again to re-send the remaining bytes, before then sending new bytes. And in the case of non-blocking, you have to take WSAEWOULDBLOCK into account as well.
And you don't put on a header on each chunk that send() sends. You put a header on each chunk you tell send() to send. You do your own chunking, don't worry about the chunking that TCP does internally. That is a network implementation, it does not affect your protocol. The receiver should be paying attention to your chunk headers, calling recv() as many times as needed to receive your full header and data to account for TCPs chunking.
Try something more like this instead:
SOCKET connection = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (connection == INVALID_SOCKET)
{
return WSAGetLastError();
}
ULONG NonBlock = 1;
in result = ioctlsocket(connection, FIONBIO, &NonBlock);
if (result == SOCKET_ERROR)
{
result = WSAGetLastError();
closesocket(connection);
return result;
}
struct sockaddr_in target;
memset(&target, 0, sizeof(target));
target.sin_family = AF_INET;
target.sin_addr.s_addr = inet_addr(host);
target.sin_port = htons(port);
result = connect(connection, (SOCKADDR*)&target, sizeof(target));
if (result == SOCKET_ERROR)
{
result = WSAGetLastError();
if (result != WSAEWOULDBLOCK)
{
closesocket(connection);
return result;
}
fd_set write_fds;
FD_ZERO(&write_fds);
FD_SET(connection, &write_fds);
struct timeval tv;
tv.tv_sec = 5;
tv.tv_usec = 0;
result = select(0, NULL, &write_fds, NULL, &tv);
if (result == SOCKET_ERROR)
{
result = WSAGetLastError();
closesocket(connection);
return result;
}
if (result == 0)
{
closesocket(connection);
return WSAETIMEDOUT;
}
}
char *tmp_data = data;
int data_remaining = data_size;
while (data_remaining > 0)
{
int pkt_data_size = min(data_remaining, 1024); // replace 1024 with whatever maximum chunk size you want...
int pkt_size = sizeof(header) + pkt_data_size;
char *pkt = malloc(pkt_size);
if (!pkt) return -1;
// fill header as needed...
memcpy(pkt+sizeof(header), tmp_data, pkt_data_size);
tmp_data += pkt_data_size;
data_remaining -= pkt_data_size;
char *tmp_pkt = pkt;
while (pkt_size > 0)
{
result = send(connection, tmp_pkt, pkt_size, 0);
if (result == SOCKET_ERROR)
{
result = WSAGetLastError();
if (result != WSAEWOULDBLOCK)
{
free(pkt);
return -1;
}
fd_set write_set;
FD_ZERO(&write_set);
FD_SET(connection, &write_set);
struct timeval time_out;
time_out.tv_sec = 5;
time_out.tv_usec = 0;
result = select(0, NULL, &write_set, NULL, &time_out);
if (result != 1)
{
free(pkt);
return -1;
}
continue;
}
tmp_pkt += result;
pkt_size -= result;
}
free(pkt);
}

Related

C++ is there anyway I can set a life cycle for recv function in WinSock?

Here is the code I have for accept and recv
int sock = createSocket();
int rc = ::bind(sock, glDispenserServerConfig->m_szServerPort);
sockaddr_in clientAddr;
int clientAddrSize = sizeof(clientAddr);
int clientSock;
bool saveImage;
while (-1 != (clientSock = accept(sock,(sockaddr*)&clientAddr, (socklen_t*)&clientAddrSize))) {
string requestStr;
int bufSize = 5;
requestStr.resize(bufSize);
string data = "";
string::size_type position;
bool getFlag = false;
while(1) {
recv(clientSock, &requestStr[0], bufSize, 0);
data += requestStr;
// position = data.find("}");
if(requestStr[0] == '}')
break;
else if(requestStr[1] == '}')
break;
else if(requestStr[2] == '}')
break;
else if(requestStr[3] == '}')
break;
else if(requestStr[4] == '}')
break;
if(requestStr[0] == 'G' && requestStr[1] == 'E' && requestStr[2] == 'T' && requestStr[3] == ' ') {
getFlag = true;
for(int i = 0; i < 20; i++) {
recv(clientSock, &requestStr[0], bufSize, 0);
data += requestStr;
}
break;
}
}
And two function being used:
int createSocket() {
int sock = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (sock == -1) {
printf("Create socket failed!\n");
}
return sock;
}
bool bind(int &sock, unsigned short port) {
if (sock <= 0) {
createSocket();
}
sockaddr_in addr;
addr.sin_family = AF_INET;
addr.sin_port = htons(port);
addr.sin_addr.s_addr = htons(0);
if (::bind(sock, (sockaddr*)&addr, sizeof(addr)) != 0) {
printf("bind port %d failed!\n", port);
return false;
}
printf("bind port %d succeeded.\n", port);
listen(sock, 10);
return true;
}
I was trying to get data stream from POST request. Since the size of body is unknown, I made a loop to read the whole body. And this port is designed for getting POST request, so I break the loop and made a flag for later use.
My issue here is I check the ending symbol which is "}" here. I'm worried about sometime if missing the ending symbol the server will be blocked by recv function.
So I wonder if there is some way I can set recv max waiting time, like 2 seconds it did not read anything from buffer then break the loop and continuous?
Based on the comments from Remy Lebeau, here is my new code:
sockaddr_in clientAddr;
int32 clientAddrSize = sizeof(clientAddr);
int32 clientSock;
bool saveImage;
struct timeval timeout;
socklen_t len = sizeof(timeout);
timeout.tv_sec = 10;
timeout.tv_usec = 0;
while (-1 != (clientSock = accept(sock, (sockaddr*)&clientAddr, (socklen_t*)&clientAddrSize))) {
int err = setsockopt(clientSock, SOL_SOCKET, SO_RCVTIMEO, (char *)&timeout.tv_sec, sizeof(struct timeval));
This will stop blocking by recv in 10 seconds

C++ Socket API "Heartbeat"

I'm trying to make a simple heartbeat check from client to server and vice-versa, if connection on either is broken off unexpectedly it prints a message and calls closesocket.
I spent 8 hours on this and it still isn't acceptable to my mentor. Right now I got something that works, but if breakpoint is placed before while loop and connected client is forcefully closed, trying to go past breakpoint causes crash when it should break the loop and write out error.
Server side code:
int main(int argc, char *argv[])
{
SOCKET s, sa;
WSAData oWSAData;
WORD wVersion = 0x0001;
WSAStartup(wVersion, &oWSAData);
s = socket(AF_INET, SOCK_STREAM, 0);
sockaddr_in srv_address;
memset(&srv_address, 0, sizeof(srv_address));
srv_address.sin_family = AF_INET;
srv_address.sin_addr.S_un.S_addr = htonl(INADDR_ANY);
srv_address.sin_port = htons(1099);
bind(s, (sockaddr*) &srv_address, sizeof(srv_address));
int l = listen(s, 10);
if (l < 0)
printf("Listen error\n");
else
{
printf("Listen OK. Listening on port %u\n",
htons(srv_address.sin_port));
sa = accept(s, NULL, NULL);
while (true)
{
char buffer[1000];
int nRecvLen = recv(sa, buffer, 999, 0);
buffer[nRecvLen] = '\0';
int r = recv(sa, NULL, 0, 0);
if (r == SOCKET_ERROR && WSAGetLastError() == WSAECONNRESET)
{
printf("Konekcija je naglo prekinuta!\n");
break;
}
else
{
if (nRecvLen > 0)
{
for (int i = 0; i < nRecvLen; i++)
{
cout << buffer[i];
}
}
}
}
closesocket(sa);
closesocket(s);
}
WSACleanup();
return 0;
}
and client side:
int main()
{
SOCKET s;
WSAData oWSAData;
WORD wVersion = 0x0001;
WSAStartup(wVersion, &oWSAData);
s = socket(AF_INET, SOCK_STREAM, 0);
sockaddr_in srv_address;
memset(&srv_address, 0, sizeof(srv_address));
srv_address.sin_family = AF_INET;
srv_address.sin_addr.S_un.S_un_b.s_b1 = xxx;
srv_address.sin_addr.S_un.S_un_b.s_b2 = xxx;
srv_address.sin_addr.S_un.S_un_b.s_b3 = x;
srv_address.sin_addr.S_un.S_un_b.s_b4 = xxx;
srv_address.sin_port = htons(1099);
int c = connect(s, (sockaddr*) &srv_address, sizeof(srv_address));
if (c < 0)
{
printf("Connection error\n");
cout << (WSAGetLastError());
}
else
{
string l = "Heartbeat\n";
int p = l.size();
char buff[1000];
strcpy_s(buff, l.c_str());
printf("Connected\n");
while (true)
{
if (send(s, buff, p, 0) > 0)
{
Sleep(1000);
}
else
{
printf("Konekcija je naglo prekinuta\n");
shutdown(s, SD_BOTH);
closesocket(s);
break;
}
}
WSACleanup();
return 0;
}
}

What`s wrong with this socket select code?

#include <stdio.h>
#include <time.h>
#include <WinSock2.h>
#include <WS2tcpip.h>
#pragma comment(lib, "WS2_32.lib")
#define IP_ADDRESS "127.0.0.1"
#define PORT 20000
#define BUF_SIZE 64
#undef FD_SETSIZE
#define FD_SETSIZE 10000
void shuffle_buffer(char* buf, size_t size);
SOCKET create_socket();
void send_data(SOCKET sock);
int main()
{
WSADATA ws;
if (WSAStartup(MAKEWORD(2, 2), &ws) != 0)
{
printf("Init Windows Socket Failed::%d\n", GetLastError());
return -1;
}
const int CLIENT_SIZE = 1;
SOCKET socks[CLIENT_SIZE];
struct timeval tv = { 0, 10 };
fd_set fd_read, fd_write;
FD_ZERO(&fd_read);
FD_ZERO(&fd_write);
for (int i = 0; i < CLIENT_SIZE; i++) {
SOCKET sock = create_socket();
socks[i] = sock;
FD_SET(sock, &fd_write);
FD_SET(sock, &fd_read);
}
Sleep(1000);
int number_to_recv = CLIENT_SIZE;
while (number_to_recv > 0) {
int ret = select(CLIENT_SIZE, &fd_read, &fd_write, NULL, &tv);
for (int i = 0; i < CLIENT_SIZE; i++) {
if (FD_ISSET(socks[i], &fd_read)) {
char buf[BUF_SIZE];
int n = recv(socks[i], buf, BUF_SIZE, 0);
buf[n] = 0;
printf("%s\n", buf);
number_to_recv--;
}
if (FD_ISSET(socks[i], &fd_write)) {
send_data(socks[i]);
FD_CLR(socks[i], &fd_write);
//Sleep(1);
}
}
//printf("ret and number : %d, %d\n", ret, number_to_recv);
}
for (int i = 0; i < CLIENT_SIZE; i++) {
closesocket(socks[i]);
}
WSACleanup();
}
SOCKET create_socket()
{
SOCKET cli_sock;
struct sockaddr_in addr;
if ((cli_sock = socket(AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
{
printf("Create Socket Failed::%d\n", GetLastError());
return -1;
}
//inet_pton
memset(addr.sin_zero, 0x00, 8);
addr.sin_family = AF_INET;
inet_pton(AF_INET, IP_ADDRESS, (void*)(&addr.sin_addr.s_addr));
addr.sin_port = htons(PORT);
if (connect(cli_sock, (struct sockaddr*)&addr, sizeof(addr)) == SOCKET_ERROR)
{
printf("Connect Error::%d\n", GetLastError());
return -1;
}
return cli_sock;
}
void send_data(SOCKET sock)
{
const int SEND_SIZE = BUF_SIZE / 2;
char buf[SEND_SIZE] = { 0 };
memset(buf, 'a', SEND_SIZE);
shuffle_buffer(buf, SEND_SIZE);
if (send(sock, buf, SEND_SIZE, 0) == SOCKET_ERROR)
{
printf("Send Info Error::%d\n", GetLastError());
}
}
void shuffle_buffer(char* buf, size_t size)
{
for (int i = 0; i < size; i++) {
buf[i] += int(rand() % 26);
}
}
Code above is a socket client using select model run on Win10, the problem is after I send data, but I can not receive data(I am sure that server has sent back data), this code below doesn`t run, so what is the problem? Thanks
The first parameter in select is maxfdp, and I know the difference between Win and Unix, so on Windows, this parameter seems not necessary, and I can write data,
but can not receive it.
if (FD_ISSET(socks[i], &fd_read)) {
char buf[BUF_SIZE];
int n = recv(socks[i], buf, BUF_SIZE, 0);
buf[n] = 0;
printf("%s\n", buf);
number_to_recv--;
}
select removes the sockets from the fd_set if they are not readable/writable. You need to add them back in before the next time you call select.
The reason your code can write data is because sockets start out being writable, so they will still be set in fd_write and your code will write data. They don't start out being readable, if no data has been received yet, so they'll be removed from the fd_read set and then your code stops checking whether they are readable.

Memory leak on socket

I'm writing a tcp proxy and while it seem to work it leaves a memory leak behind. I manipulated the code to forward the incoming packet to itself to create 10000 sockets and close them to see where the leak is. However I can't figure it out. I've used deleaker and it doesn't shows any leak(besides a small one that I don't care.)
But then I untick the two boxes and this comes out.
Any help would be appreciated!
Code:
#include <winsock2.h>
#include <stdio.h>
#include <windows.h>
#include <ws2tcpip.h>
#include <tchar.h>
#include <process.h> /* _beginthread() */
// Need to link with Ws2_32.lib
#pragma comment(lib, "Ws2_32.lib")
#define PORT "1234" /* Port to listen on */
#define BUF_SIZE 4096 /* Buffer for transfers */
typedef struct {
char *host;
char *port;
SOCKET sock;
}
HandleStruct;
unsigned int S2C(SOCKET from, SOCKET to)
{
char buf[BUF_SIZE];
unsigned int disconnected = 0;
size_t bytes_read, bytes_written;
bytes_read = recv(from, buf, BUF_SIZE, 0);
if (bytes_read == 0) {
disconnected = 1;
}
else {
bytes_written = send(to, buf, bytes_read, 0);
if (bytes_written == -1) {
disconnected = 1;
}
}
return disconnected;
}
unsigned int C2S(SOCKET from, SOCKET to)
{
char buf[BUF_SIZE];
unsigned int disconnected = 0;
size_t bytes_read, bytes_written;
bytes_read = recv(from, buf, BUF_SIZE, 0);
if (bytes_read == 0) {
disconnected = 1;
}
else {
bytes_written = send(to, buf, bytes_read, 0);
if (bytes_written == -1) {
disconnected = 1;
}
}
return disconnected;
}
void handle(void *param)
{
HandleStruct *args = (HandleStruct*) param;
SOCKET client = args->sock;
const char *host = args->host;
const char *port = args->port;
SOCKET server = -1;
unsigned int disconnected = 0;
fd_set set;
unsigned int max_sock;
struct addrinfo *res = NULL;
struct addrinfo *ptr = NULL;
struct addrinfo hints;
/* Get the address info */
ZeroMemory( &hints, sizeof(hints) );
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
hints.ai_protocol = IPPROTO_TCP;
if (getaddrinfo(host, port, &hints, &res) != 0) {
perror("getaddrinfo");
closesocket(client);
return;
}
/* Create the socket */
server = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (server == INVALID_SOCKET) {
perror("socket");
closesocket(client);
return;
}
/* Connect to the host */
if (connect(server, res->ai_addr, res->ai_addrlen) == -1) {
perror("connect");
closesocket(client);
return;
}
if (client > server) {
max_sock = client;
}
else {
max_sock = server;
}
/* Main transfer loop */
while (!disconnected) {
FD_ZERO(&set);
FD_SET(client, &set);
FD_SET(server, &set);
if (select(max_sock + 1, &set, NULL, NULL, NULL) == SOCKET_ERROR) {
perror("select");
break;
}
if (FD_ISSET(client, &set)) {
disconnected = C2S(client, server);
}
if (FD_ISSET(server, &set)) {
disconnected = S2C(server, client);
}
}
closesocket(server);
closesocket(client);
fprintf(stderr, "Sockets Closed: %d/%d", server, client);
_endthread();
return;
}
int _tmain(int argc)
{
WORD wVersion = MAKEWORD(2, 2);
WSADATA wsaData;
int iResult;
SOCKET sock;
struct addrinfo hints, *res;
int reuseaddr = 1; /* True */
/* Initialise Winsock */
if (iResult = (WSAStartup(wVersion, &wsaData)) != 0) {
fprintf(stderr, "WSAStartup failed: %dn", iResult);
return 1;
}
char * host = "127.0.0.1";
char * port = "1234";
/* Get the address info */
ZeroMemory(&hints, sizeof hints);
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
if (getaddrinfo(NULL, PORT, &hints, &res) != 0) {
perror("getaddrinfo");
WSACleanup();
return 1;
}
/* Create the socket */
sock = socket(res->ai_family, res->ai_socktype, res->ai_protocol);
if (sock == INVALID_SOCKET) {
perror("socket");
WSACleanup();
return 1;
}
/* Enable the socket to reuse the address */
if (setsockopt(sock, SOL_SOCKET, SO_REUSEADDR, (const char*)&reuseaddr,
sizeof(int)) == SOCKET_ERROR) {
perror("setsockopt");
WSACleanup();
return 1;
}
/* Bind to the address */
if (bind(sock, res->ai_addr, res->ai_addrlen) == SOCKET_ERROR) {
perror("bind");
WSACleanup();
return 1;
}
/* Listen */
if (listen(sock, 6500) == SOCKET_ERROR) {
perror("listen");
WSACleanup();
return 1;
}
freeaddrinfo(res);
int i = 0;
HandleStruct *arg;
arg = (HandleStruct *)malloc(sizeof( HandleStruct));
/* Main loop */
while(1) {
int size = sizeof(struct sockaddr);
struct sockaddr_in their_addr;
SOCKET newsock;
ZeroMemory(&their_addr, sizeof (struct sockaddr));
newsock = accept(sock, (struct sockaddr*)&their_addr, &size);
if (newsock == INVALID_SOCKET) {
perror("acceptn");
}
else {
arg->sock = newsock;
arg->host = host;
arg->port = port;
if (i < 10000) {
_beginthread(handle, 0, (void*) arg);
i++;
}
}
}
closesocket(sock);
WSACleanup();
return 0;
}
I'm not familiar with reading the program in the screenshots you posted; however, you should probably be concerned about this line:
arg = (HandleStruct *)malloc(sizeof( HandleStruct));
Here you are allocating memory for a HandleStruct via malloc() which doesn't appear to be cleaned up anywhere with a subsequent call to free(). You pass arg into handle() but still don't deallocate the memory.
It doesn't appear to be handle()'s responsibility to clean arg up, so you should probably have a call to free() after the while loop, or you could allocate the HandleStruct at the beginning of each loop and deallocate it at the end.
Or you could save yourself the hassle and use std::unique_ptr, and optionally change your threads to std::thread, which self-documents who owns the memory etc:
void handle(std::unique_ptr<HandleStruct> args)
{
// Manipulate args
...
}
int main()
{
std::unique_ptr<HandleStruct> pHandle = std::make_unique<HandleStruct>();
for (;;)
{
...
pHandle->sock = newsock;
pHandle->host = host;
pHandle->port = port;
// Create thread:
std::thread t(&handle, pHandle);
// Wait for thread to finish so pHandle doesn't change while we are using it on another thread
// t.join();
}
}
Every socket uses some memory in the operating system.
Here the description in Linux : accept
ENOBUFS, ENOMEM
Not enough free memory. This often means that the memory
allocation is limited by the socket buffer limits, not by the
system memory.
The OS might not clean them up.
You are also trying to create 10000 threads, these might also take some memory, if the creation doesn't fail long before with errno set to EAGAIN.

select function windows vs linux

i've the following code which runs just fine under windows env while in linux using the same code it doesn't (besides few changes of libs) .the select function does not respond to new connections.
The relevant code is as follows:
struct SocketState
{
int id; // Socket handle
int recv; // Receiving?
int send; // Sending?
int sendSubType; // Sending sub-type
char buffer[128];
int len;
int authenticate;
char userName[10];
};
struct SocketState sockets[MAX_SOCKETS]={0};
int socketsCount = 0;
int main()
{
int listenSocket = socket(PF_INET, SOCK_STREAM, 0);
if (-1 == listenSocket)
{
return 0 ;
}
struct sockaddr_in serverService;
serverService.sin_family = AF_INET;
serverService.sin_addr.s_addr = htonl(INADDR_ANY);
serverService.sin_port = htons(TIME_PORT);
if (-1 == bind(listenSocket, (struct sockaddr*)&serverService, sizeof(serverService)))
{
perror("Couldn't bind socket");
return -1;
}
if (-1 == listen(listenSocket, 10))
{
perror("Couldn't listen to port");
}
addSocket(listenSocket, LISTEN);
while (true)
{
fd_set waitRecv;
FD_ZERO(&waitRecv);
for (int i = 0; i < MAX_SOCKETS; i++)
{
if ((sockets[i].recv == LISTEN) || (sockets[i].recv == RECEIVE))
FD_SET(sockets[i].id, &waitRecv);
}
fd_set waitSend;
FD_ZERO(&waitSend);
for (int i = 0; i < MAX_SOCKETS; i++)
{
if (sockets[i].send == SEND)
FD_SET(sockets[i].id, &waitSend);
}
int nfd;
nfd = select(0, &waitRecv, &waitSend, NULL, NULL);
if (nfd == -1)
{
return 0 ;
}
}
}
You are passing 0 as the first argument to select. That is wrong. Probably in Windows this parameter is not used, but in linux it have to be set correctly.
It has to be set to the number of the higher fd plus 1.