I have a simple socket server set up using sys/socket and OpenSSL. For each connection, the client is required to send a message to the server, receive a response and then reply to that response.
I can't find any clear mechanism for making these sockets non-blocking? The system has to be able to handle multiple sockets concurrently...
My server code for listening for connections:
while(1)
{
struct sockaddr_in addr;
uint len = sizeof(addr);
SSL *ssl;
int client = accept(sock, (struct sockaddr*)&addr, &len);
if (client > 0)
{
std::cout<<"Client accepted..."<<std::endl;
}
else
{
perror("Unable to accept");
exit(EXIT_FAILURE);
}
ssl = SSL_new(ctx);
SSL_set_fd(ssl, client);
if (SSL_accept(ssl) <= 0)
{
std::cout<<"ERROR"<<std::endl;
}
else
{
char buff[1024];
SSL_read(ssl, buff, 1024);
std::cout<<buff<<std::endl;
std::string reply="Thanks from the server";
char buff_response[1024];
reply.copy(buff_response, 1024);
const void *buf=&buff_response;
SSL_write(ssl, buf, 1024);
char another_buff[1024];
SSL_read(ssl,another_buff,1024);
std::cout<<another_buff<<std::endl;
}
}
I've looked into 'select()', however this doesn't seem to allow concurrency as such, but allows the system to know when a socket is freed?
Does anyone have any experience in solving this basic problem?
First, with server code, it's important to differentiate between concurrency and parallelism. A reasonable server will typically handle many more connections concurrently than its number of cores. Consequently, it's important to make the code concurrent in the sense that it can (efficiently) handle many concurrent connections, in a way that does not rely on parallelism (in the sense of having each connection handled by a thread).
In this sense, select is actually a reasonable choice for concurrency, and gives you the effect of being non-blocking.
When your system handles multiple sockets concurrently, select indicates on which socket(s) you can perform operations such as send and recv without their blocking when you do so. If you use select well you won't have cases where your thread is idling, waiting indefinitely for some operation to proceed, while other sockets are ready.
The minimal example from gnu.org shows a reasonably efficient server which it seems you can adapt to your needs.
fd_set active_fd_set, read_fd_set;
FD_ZERO (&active_fd_set);
FD_ZERO (&read_fd_set);
// Use FD_SET to add sockets according to what you want to do with them
/* This call (checking to see who can be read) is the
* only thing that blocks. But if it does, no socket is ready for reading. */
if (select (FD_SETSIZE, &read_fd_set, NULL, NULL, NULL) < 0) {
// Handle error;
for (i = 0; i < FD_SETSIZE; ++i)
if (FD_ISSET (i, &read_fd_set))
// Here you can read without its blocking.
Related
I am relatively new to network programming and multithreading in C++. Currently my recv() call returns an unknown error. I'm not quite sure where the error coming from at the moment and would appreciate some help.
I used putty to connect to the server locally
class Threaded_TCPListener{
int Threaded_TCPListener::Init()
{
// Initializing WinSock
WSADATA wsData;
WORD ver = MAKEWORD(2,2);
int winSock = WSAStartup(ver, &wsData);
if(winSock != 0)
return winSock;
// Creating listening socket
this->socket = ::socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if(this->socket == INVALID_SOCKET)
return WSAGetLastError();
// Fill sockaddr with ip addr and port
sockaddr_in hint;
hint.sin_family = AF_INET;
hint.sin_port = htons(this->port);
inet_pton(AF_INET, this->ipAddress, &hint.sin_addr);
// Bind hint to socket
if(bind(this->socket, (sockaddr*)&hint, sizeof(hint)) == SOCKET_ERROR)
return WSAGetLastError();
// Start listening on socket
if(listen(this->socket, SOMAXCONN) == SOCKET_ERROR)
return WSAGetLastError();
// Accept first client
this->createAcceptThread();
return 0;
}
int Threaded_TCPListener::Run()
{
bool isRunning = true;
// Read from all clients
std::vector<std::thread> threads;
threads.reserve(this->clients.size());
// Recv from client sockets
for (int i=0; i < this->clients.size(); ++i)
{
threads.emplace_back(std::thread(&Threaded_TCPListener::receiveFromSocket, this, socket));
}
// Wait for all threads to finish
for(std::thread& t : threads)
{
t.detach();
}
return 0;
}
void Threaded_TCPListener::onMessageReceived(int clientSocket, const char* msg, int length)
{
Threaded_TCPListener::broadcastToClients(clientSocket, msg, length);
std::thread t(&Threaded_TCPListener::receiveFromSocket, this, clientSocket);
t.detach();
return;
}
void Threaded_TCPListener::sendMessageToClient(int clientSocket, const char * msg, int length)
{
send(clientSocket, msg, length, 0);
return;
}
void Threaded_TCPListener::broadcastToClients(int senderSocket, const char * msg, int length)
{
std::vector<std::thread> threads;
threads.reserve(clients.size());
// Iterate over all clients
for (int sendSock : this->clients)
{
if(sendSock != senderSocket)
threads.emplace_back(std::thread(&Threaded_TCPListener::sendMessageToClient, this,sendSock, msg, length));
}
// Wait for all threads to finish
for(std::thread& t : threads)
t.join();
return;
}
void Threaded_TCPListener::createAcceptThread()
{
// Start accepting clients on a new thread
this->listeningThread = std::thread(&Threaded_TCPListener::acceptClient, this);
this->listeningThread.detach();
return;
}
void Threaded_TCPListener::acceptClient()
{
int client = accept(this->socket, nullptr, nullptr);
// Error
if(client == INVALID_SOCKET)
{
std::printf("Accept Err: %d\n", WSAGetLastError());
}
// Add client to clients queue
else
{
// Add client to queue
this->clients.emplace(client);
// Client Connect Confirmation
onClientConnected(client); // Prints msg on server
// Create another thread to accept more clients
this->createAcceptThread();
}
return;
}
void Threaded_TCPListener::receiveFromSocket(int receivingSocket)
{
// Byte storage
char buff[MAX_BUFF_SIZE];
// Clear buff
memset(buff, 0, sizeof(buff));
// Receive msg
int bytesRecvd = recv(receivingSocket, buff, MAX_BUFF_SIZE, 0);
if(bytesRecvd <= 0)
{
char err_buff[1024];
strerror_s(err_buff, bytesRecvd);
std::cerr << err_buff;
// Close client
this->clients.erase(receivingSocket);
closesocket(receivingSocket);
onClientDisconnected(receivingSocket); // Prints msg on server
}
else
{
onMessageReceived(receivingSocket, buff, bytesRecvd);
}
}
}
I am trying to create a multithreaded TCP 'server' that will handle incoming clients by having an accept thread continuously running (listening for new connections), and a thread waiting with a recv block for each client connected to the server.
Your Init looks fine:
create socket, bind it, listen on it, start accept thread
In your accept thread's acceptClient looks sort of OK:
print some message
add the client socket to clients queue
create a new accept thread
Your Run makes no sense:
create one thread per element in clients to receive from the listening socket
It looks like you are spawning a new thread for every single socket action. That is a pretty wasteful design. As soon as the thread is done it can go back to doing something else.
So creating a new accept thread in acceptClient is a waste, you could just loop back to the beginning to ::accept the next client. Like so:
acceptClient() {
while (alive) {
int client = accept(socket, ...);
createClientHandler(client);
}
}
What seems to be missing is spawning a new client thread to service the client socket. You currently do this in Run, but that's before any of the clients are actually accepted. And you do it for the wrong socket! Instead, you should be spawning the receiveFromSocket threads in acceptClient, and passing it the client socket. So that's a bug.
In your receiveFromSocket you also need not create another thread to receiveFromSocket again -- just loop back to the beginning.
The biggest concern with this thread-per-action design is that you are spawning sender threads on every incoming message. This means you could actually have several sender threads attempting to ::send on the same TCP socket. That's not very safe.
The order of calls made to WSASend is also the order in which the buffers are transmitted to the transport layer. WSASend should not be called on the same stream-oriented socket concurrently from different threads, because some Winsock providers may split a large send request into multiple transmissions, and this may lead to unintended data interleaving from multiple concurrent send requests on the same stream-oriented socket.
https://learn.microsoft.com/en-us/windows/desktop/api/winsock2/nf-winsock2-wsasend
Similarly, instead of spawning threads in broadcastToClients, I suggest you just spawn one persistent sender thread per client socket in acceptClient (together with the receiveFromSocket thread within some createClientHandler).
To communicate with the sender threads you should use thread-safe blocking queues. Each sender thread would look like this:
while (alive) {
msg = queue.next_message();
send(client_socket, msg);
}
Then on message received you just do:
for (client : clients) {
client.queue.put_message(msg);
}
So to summarize, to handle each client you need a structure like this:
struct Client {
int client_socket;
BlockingQueue queue;
// optionally if you want to keep track of your threads
// to be able to safely clean up
std::thread recv_thread, send_thread;
};
Safe cleanup is a whole other story.
Finally, a remark on this comment in your code:
// Wait for all threads to finish
for(std::thread& t : threads)
{
t.detach();
}
That's almost the opposite to what std::thread::detach does:
https://en.cppreference.com/w/cpp/thread/thread/detach
It allows you to destroy the thread object without having to wait for the thread to finish execution.
There is a misconception in the code in how a TCP server has to be implemented:
You seem to assume that you can have a single server socket file descriptor which can handle all communication. This is not the case. You must have a single dedicated socket file descriptor which is just used for listening and accepting incoming connections, and then you have one additional file descriptor for each existing connection.
In your code I see that you invoke receiveFromSocket() always with the listening socket. This is wrong. Also invoking receiveFromSocket() in a loop for all clients is wrong.
What you rather need to do is:
- Have one dedicated thread which call accept() in a loop. There is no performance benefit in calling accept() from multiple threads.
- One accept() returns a new connection you spawn a new thread which calls recv() in a loop. This will then block and wait for new data as you expect in your question.
You also need to drop the habit of calling individual functions from new threads. This is not multithreaded programming. A thread usually contains a loop. Everything else is usually a design flaw.
Also note that multithreaded programming is still rocket science in 2019, especially in C++. If you are not an absolute expert you will not be able to do it. Also note that absolute experts in multithreaded programming will try to avoid multithreaded programming whenever possible. A lot seemingly concurrent tasks which are I/O bound can better be handled by a single threaded event based system.
I'm writing a network game for a university project and while I have messages being sent and received between a client and a server, I'm unsure on how I would go about implementing a writeable fd_set (my lecturer's example code only included a readable fd_set) and what the function is of both fd_sets with select(). Any insight you could give would be great in helping me understand this.
My server code is as such:
bool ServerSocket::Update() {
// Update the connections with the server
fd_set readable;
FD_ZERO(&readable);
// Add server socket, which will be readable if there's a new connection
FD_SET(m_socket, &readable);
// Add connected clients' sockets
if(!AddConnectedClients(&readable)) {
Error("Couldn't add connected clients to fd_set.");
return false;
}
// Set timeout to wait for something to happen (0.5 seconds)
timeval timeout;
timeout.tv_sec = 0;
timeout.tv_usec = 500000;
// Wait for the socket to become readable
int count = select(0, &readable, NULL, NULL, &timeout);
if(count == SOCKET_ERROR) {
Error("Select failed, socket error.");
return false;
}
// Accept new connection to the server socket if readable
if(FD_ISSET(m_socket, &readable)) {
if(!AddNewClient()) {
return false;
}
}
// Check all clients to see if there are messages to be read
if(!CheckClients(&readable)) {
return false;
}
return true;
}
A socket becomes:
readable if there is either data in the socket receive buffer or a pending FIN (recv() is about to return zero)
writable if there is room in the socket receive buffer. Note that this is true nearly all the time, so you should use it only when you've encountered a prior EWOULDBLOCK/EAGAIN on the socket, and stop using it when you don't.
You'd create an fd_set variable called writeable, initialize it the same way (with the same sockets), and pass it as select's third argument:
select(0, &readable, &writeable, NULL, &timeout);
Then after select returns you'd check whether each socket is still in the set writeable. If so, then it's writeable.
Basically, exactly the same way readable works, except that it tells you a different thing about the socket.
select() is terribly outdated and it's interface is arcane. poll (or it's windows counterpart WSAPoll is a modern replacement for it, and should be always preferred.
It would be used in following manner:
WSAPOLLFD pollfd = {m_socket, POLLWRNORM, 0};
int rc = WSAPoll(&pollfd, 1, 100);
if (rc == 1) {
// Socket is ready for writing!
}
Firstly I'm coding in c++ and running in Linux/CentOS 6.4
So after a really long time and reading a lot of different books on sockets, I finally have at least my client and my server partially working.
First I want to continuously accept messages from different clients, I have already setup the client, and it finally successfully compiled at least. Now I need to set up my server so that I can properly test.
What I'm doing is implementing the dining philosopher problem with sockets, with each client/philosopher representing a different process. I was going to go through this whole thing, where the server was going to keep track of everything, like the states of all the client. That was too difficult, I have now just created the client just to send their status to the server and the server prints it out.
I was thinking of putting a do/while loop to continuously accept messages, but not sure what I should use to stop the loop. Note that I will have a while loop set up in my client, which is signaled to stop after an elapsed amount of time. It should then close that particular client. I do have a signal in my serve, but I am not sure it works.
#include "helper.h"
char buffer[4096];
void sigchld_handler(int signo)
{
while (waitpid(-1, NULL, WNOHANG) > 0);
}
void client(int &newsock, int nread)
{
do
{
int nread = recv(newsock, buffer,sizeof(buffer), 0);
puts(buffer);
}while(nread!=0);
}
int main(int argc, char *argv[])
{
struct sockaddr_in sAddr, cli_addr;
socklen_t client_len;
int listensock;
int newsock;
int result;
int nread=1;
pid_t childid; ;
int status;
if((listensock = socket(AF_INET, SOCK_STREAM, 0))<0)
{
perror("Problem in creating socket");
exit(2);
}
sAddr.sin_family = AF_INET;
sAddr.sin_port = htons(3333);
sAddr.sin_addr.s_addr = htonl(INADDR_ANY);
bind(listensock, (struct sockaddr *) &sAddr, sizeof(sAddr));
if (result < 0) {
perror("exserver2");
return 0;
}
result = listen(listensock, 5);
if (result < 0) {
perror("exserver2");
return 0;
}
signal(SIGCHLD, sigchld_handler);
while (1) {
client_len = sizeof(cli_addr);
newsock = accept(listensock,(struct sockaddr *)&cli_addr, &client_len);
if ((childid = fork()) == 0) {
printf("child process %i created.\n", getpid());
close(listensock);
client(newsock, nread);
}
if(status<0)
{
printf("%s\n" "Read error");
exit(1);
}
close(newsock);
}
}
You need a multiplexing syscall like poll(2) (or the old, nearly obsolete, select(2)syscall). You may want to use some (or implement your own) event loop. See this & that answer. Read about the C10K problem.
Every server needs an event loop.
Read Advanced Linux Programming (or some Posix network programming book).
You may want to simply run your server program under tcpserver (see http://cr.yp.to/ucspi-tcp.html). This will spawn a new instance of your program each time a client connects to your program (on the port that you specify). This way, you can focus on the core logic of your program, and let tcpserver handle all of the heavy lifting as far as the socket programming, etc. tcpserver will pass input from the client to your program's stdin, and output from your programs stdout will be sent back to the client.
I'm designing a distributed server/client system with C++, in which many clients send request to many servers through TCP and server throw a thread to handle the request and send back it's response. In my use case only limited number of clients will access the server and I need very high performance.The data sent from client and server are all small, but are very frequent. So creating a connection and tearing it down it after use is expensive. So I want to use connection caching to solve this problem: once connection created, it will be stored in a cache for future use.(Assume that the number of clients will not beyond the size of cache).
My question is:
I saw someone said that connection pooling is a client side technique. If this connection pooling is only used in client side, then first time it make connection to a server, and send data. This action of making connection triggers the accept() function in server side which return a socket for receiving from client. So when client wants to use a existing connection(in cache), it doesn't make new connection, but just send data. The problem is, if no making connection, who would trigger accept() in server side and to throw a thread?
If connection pooling also need to be implemented in server side, how can I know where a request come from? Since only from accept() I can get the client address, but meanwhile accept() already make a new socket for that request, so no point to use a cached connection.
Any answer and suggestion will be appreciated. Or any one can give me an example of connection pool or connection caching?
I saw someone said that connection pooling is a client side technique. ... if no making connection, who would trigger accept() in server side and to throw a thread?
Firstly, connection pooling is not just a client-side technique; it's a connection-mode technique. It applies to both types of peer (the "server" and the "client").
Secondly, accept doesn't need to be called to start a thread. Programs can start threads for any reason they like... They could start threads just to start more threads, in a massively parallelised loop of thread creation. (edit: we call this a "fork bomb")
Finally, an efficient thread-pooling implementation won't start a thread for each client. Each thread typically occupies between 512KB-4MB (counting stack space and other context information), so if you have 10000 clients each occupying that much, that's a lot of wasted memory.
I want to do so, but just don't know how to do it in multithreading case.
You shouldn't use multithreading here... At least, not until you have a solution that uses a single thread, and you decide that it's not fast enough. At the moment you don't have that information; you're just guessing, and guessing doesn't guarantee optimisation.
At the turn of the century there were FTP servers that solved the C10K problem; they were able to handle 10000 clients at any given time, browsing, downloading or idling as users tend to do on FTP servers. They solved that problem not by using threads, but by using non-blocking and/or asynchronous sockets and/or calls.
To clarify, those web servers handled thousands of connections on a single thread! One typical way is to use select, but I'm not particularly fond of that method because it requires a rather ugly series of loops. I prefer to use ioctlsocket for Windows and fcntl for other POSIX OSes to set the file descriptor into non-blocking mode, e.g.:
#ifdef WIN32
ioctlsocket(fd, FIONBIO, (u_long[]){1});
#else
fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK);
#endif
At this point, recv and read won't block when operating on fd; if there's no data available, they'll return an error value immediately rather than waiting for data to arrive. That means you can loop on multiple sockets.
If connection pooling also need to be implemented in server side, how can I know where a request come from?
Store the client fd along-side its struct sockaddr_storage and any other stateful information you need to store about clients, in a struct that you declare however you feel. If this ends up being 4KB (which is a fairly large struct, usually about as large as they need to get) then 10000 of these will only occupy about 40000KB (~40MB). Even the mobile phones of today should have no problems handling that. Consider completing the following code for your needs:
struct client {
struct sockaddr_storage addr;
socklen_t addr_len;
int fd;
/* Other stateful information */
};
#define BUFFER_SIZE 4096
#define CLIENT_COUNT 10000
int main(void) {
int server;
struct client client[CLIENT_COUNT] = { 0 };
size_t client_count = 0;
/* XXX: Perform usual bind/listen */
#ifdef WIN32
ioctlsocket(server, FIONBIO, (u_long[]){1});
#else
fcntl(server, F_SETFL, fcntl(server, F_GETFL, 0) | O_NONBLOCK);
#endif
for (;;) {
/* Accept connection if possible */
if (client_count < sizeof client / sizeof *client) {
struct sockaddr_storage addr = { 0 };
socklen_t addr_len = sizeof addr;
int fd = accept(server, &addr, &addr_len);
if (fd != -1) {
# ifdef WIN32
ioctlsocket(fd, FIONBIO, (u_long[]){1});
# else
fcntl(fd, F_SETFL, fcntl(fd, F_GETFL, 0) | O_NONBLOCK);
# endif
client[client_count++] = (struct client) { .addr = addr
, .addr_len = addr_len
, .fd = fd };
}
}
/* Loop through clients */
char buffer[BUFFER_SIZE];
for (size_t index = 0; index < client_count; index++) {
ssize_t bytes_recvd = recv(client[index].fd, buffer, sizeof buffer, 0);
# ifdef WIN32
int closed = bytes_recvd == 0
|| (bytes_recvd < 0 && WSAGetLastError() == WSAEWOULDBLOCK);
# else
int closed = bytes_recvd == 0
|| (bytes_recvd < 0 && errno == EAGAIN) || errno == EWOULDBLOCK;
# endif
if (closed) {
close(client[index].fd);
client_count--;
memmove(client + index, client + index + 1, (client_count - index) * sizeof client);
continue;
}
/* XXX: Process buffer[0..bytes_recvd-1] */
}
sleep(0); /* This is necessary to pass control back to the kernel,
* so it can queue more data for us to process
*/
}
}
Supposing you want to pool connections on the client-side, the code would look very similar, except obviously there would be no need for the accept-related code. Supposing you have an array of clients that you want to connect, you could use non-blocking connect calls to perform all of the connections at once like this:
size_t index = 0, in_progress = 0;
for (;;) {
if (client[index].fd == 0) {
client[index].fd = socket(/* TODO */);
# ifdef WIN32
ioctlsocket(client[index].fd, FIONBIO, (u_long[]){1});
# else
fcntl(client[index].fd, F_SETFL, fcntl(client[index].fd, F_GETFL, 0) | O_NONBLOCK);
# endif
}
# ifdef WIN32
in_progress += connect(client[index].fd, (struct sockaddr *) &client[index].addr, client[index].addr_len) < 0
&& (WSAGetLastError() == WSAEALREADY
|| WSAGetLastError() == WSAEWOULDBLOCK
|| WSAGetLastError() == WSAEINVAL);
# else
in_progress += connect(client[index].fd, (struct sockaddr *) &client[index].addr, client[index].addr_len) < 0
&& (errno == EALREADY
|| errno == EINPROGRESS);
# endif
if (++index < sizeof client / sizeof *client) {
continue;
}
index = 0;
if (in_progress == 0) {
break;
}
in_progress = 0;
}
As for optimisation, given that this should be able to handle 10000 clients with perhaps a few minor tweaks, you shouldn't need multiple threads.
Nonetheless, by associating items from a mutex collection with clients and preceding the non-blocking socket operation with a non-blocking pthread_mutex_trylock, the above loops could be adapted to run simultaneously in multiple threads whilst processing the same group of sockets. This provides a working model for all POSIX-compliant platforms, be it Windows, BSD or Linux, but it's not a perfectly optimal one. To achieve optimality, we must step into the asynchronous world, which varies from system to system:
Windows uses WSA* functions with call-backs.
BSD and Linux use the somewhat similar kqueue and epoll, respectively.
It may pay to codify that "non-blocking socket operation" abstraction mentioned earlier, as the two asynchronous mechanisms vary significantly in respect to their interface. Like everything else, unfortunately we must write abstractions so that our Windows-relevant code remains legible on POSIX-compliant systems. As a bonus, this'll allow us to mingle server-processing (i.e. accept and anything that follows) with client-processing (i.e. connect and anything that follows), so our server loop can become a client loop (or vice-versa).
i have a server and client classes but the problem is: when i make infinite loop to accept incoming connection i cant receive all the data received from the client while accepting the connections because accept blocks until the connection is accepted, my code:
for (;;)
{
boost::thread thread(boost::bind(&Irc::Server::startAccept, &s));
thread.join();
for (ClientsMap::const_iterator it = s.begin(); it != s.end(); ++it)
{
std::string msg = getData(it->second->recv());
std::clog << "Msg: " << msg << std::endl;
}
}
You need either multiple threads or a call to select/poll to find out which connections have unprocessed data. IBM has a nice example here, which will work on any flavor of Unix, Linux, BSD, etc. (you might need different header files depending on the OS).
Right now you're starting a thread and then waiting for it immediately, which results in sequential execution and completely defeats the purpose of threads.
Take a look here : http://www.boost.org/doc/libs/1_38_0/doc/html/boost_asio/examples.html
especially the HTTP Server 3 example, thats exactly what you are looking for , all you have to do is change that code a little bit for your needs :) and your done
A good approach would be to create one thread that only accepts new connections. That's where you have a listener socket. Then, for every connection that gets accepted, you have a new connected socket, so you can spawn another thread, giving it the connected socket as a parameter. That way, your thread that accepts connections doesn't get blocked, and can connect to many clients very fast. The processing threads deal with the clients and then they exit.
I don't even know why need to wait for them, but if you do, you may deal with it in some other way, depending on the OS and/or libraries that you use (messages, signals etc can be used).
If you don't want to spawn a new thread for each connected client, then as Ben Voigt suggested, you can use select. That is another good approach if you want to make it single threaded. Basically, all your sockets will be in an array of socket descriptors and using select you will know what happened (someone connected, socket is ready for read/write, socket got disconnected etc) and act accordingly.
Here's one example Partial one, but it works. you just accept connections in the acceptConnections(), which will then spawn a separate thread for each client. That's where you communicate with the clients. It's from a windows code that i have lying around, but it's very easy to be reimplemented for any platform.
typedef struct SOCKET_DATA_ {
SOCKET sd;
/* other parameters that you may want to pass to the clientProc */
} SOCKET_DATA;
/* In this function you communicate with the clients */
DWORD WINAPI clientProc(void * param)
{
SOCKET_DATA * pSocketData = (SOCKET_DATA *)param;
/* Communicate with the new client, and at the end deallocate the memory for
SOCKET_DATA and return.
*/
delete pSocketData;
return 0;
}
int acceptConnections(const char * pcAddress, int nPort)
{
sockaddr_in sinRemote;
int nAddrSize;
SOCKET sd_client;
SOCKET sd_listener;
sockaddr_in sinInterface;
SOCKET_DATA * pSocketData;
HANDLE hThread;
sd_listener = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
if (INVALID_SOCKET == sd_listener) {
fprintf(stderr, "Could not get a listener socket!\n");
return 1;
}
sinInterface.sin_family = AF_INET;
sinInterface.sin_port = nPort;
sinInterface.sin_addr.S_un.S_addr = INADDR_ANY;
if (SOCKET_ERROR != bind(sd_listener, (sockaddr*)&sinInterface, sizeof(sockaddr_in))) {
listen(sd_listener, SOMAXCONN);
} else {
fprintf(stderr, "Could not bind the listening socket!\n");
return 1;
}
while (1)
{
nAddrSize = sizeof(sinRemote);
sd_client = accept(sd_listener, (sockaddr*)&sinRemote, &nAddrSize);
if (INVALID_SOCKET == sd_client) {
fprintf(stdout, "Accept failed!\n");
closesocket(sd_listener);
return 1;
}
fprintf(stdout, "Accepted connection from %s:%u.\n", inet_ntoa(sinRemote.sin_addr), ntohs(sinRemote.sin_port));
pSocketData = (SOCKET_DATA *)malloc(sizeof(SOCKET_DATA));
if (!pSocketData) {
fprintf(stderr, "Could not allocate memory for SOCKET_DATA!\n");
return 1;
}
pSocketData->sd = sd_client;
hThread = CreateThread(0, 0, clientProc, pSocketData, 0, &nThreadID);
if (hThread == INVALID_HANDLE_VALUE) {
fprintf(stderr, "An error occured while trying to create a thread!\n");
delete pSocketData;
return 1;
}
}
closesocket(sd_listener);
return 0;
}