I am quite new to boost asio and I am experiencing random End of File in a multi threaded server.
I could reproduce my problem in this small example:
Server:
This is a simple echo server. The protocol is straightforward :
(1) A client Connect
(2) The server reads one byte. This byte is the length of the string to read and send back.
(3) The server reads N bytes.
(4) The server send back N+1 bytes to the client and goes back to (2).
When the Client disconnect An EOF is captured in (3) and the handler loop stops.
class MySocket{
public:
char buffer[257];
boost::asio::ip::tcp::socket socket;
MySocket(boost::asio::io_service*ios):socket(*ios){}
~MySocket(){}
};
//Handlers
void readN(std::shared_ptr<MySocket>server,const boost::system::error_code&ec);
//(4)
void echo(std::shared_ptr<MySocket>server,const boost::system::error_code&ec){
if(ec){
throw std::exception(("This is NOT OK: "+ec.message()).c_str());}
size_t n=server->buffer[0]&0xFF;
std::cout<<std::string(server->buffer+1,n)<<std::endl;
boost::asio::async_write(server->socket,boost::asio::buffer(server->buffer,n+1),boost::bind(readN,server,boost::asio::placeholders::error));}
//(3)
void read(std::shared_ptr<MySocket>server,const boost::system::error_code&ec){
if(ec){
throw std::exception(("This is OK: "+ec.message()).c_str());}
size_t n=server->buffer[0]&0xFF;
boost::asio::async_read(server->socket,boost::asio::buffer(server->buffer+1,n),boost::bind(echo,server,boost::asio::placeholders::error));}
//(2)
void readN(std::shared_ptr<MySocket>server,const boost::system::error_code&ec){
if(ec){
throw std::exception(("This is also NOT OK: "+ec.message()).c_str());}
boost::asio::async_read(server->socket,boost::asio::buffer(server->buffer+0,1),boost::bind(read,server,boost::asio::placeholders::error));}
//Server
void serve(boost::asio::io_service*ios){
for(;;){
try{ios->run();break;}
catch(const std::exception&e){std::cout<<e.what()<<std::endl;}}}
//(1)
void accept(boost::asio::io_service*ios,boost::asio::ip::tcp::acceptor*acceptor,std::shared_ptr<MySocket>server,const boost::system::error_code&ec){
if(server.get()!=nullptr){
server->socket.set_option(boost::asio::ip::tcp::no_delay(true));
readN(server,ec);}
server.reset(new MySocket(ios));
acceptor->async_accept(server->socket,boost::bind(accept,ios,acceptor,server,boost::asio::placeholders::error));}
int main(){
boost::asio::io_service ios;
boost::asio::ip::tcp::acceptor acceptor(ios,boost::asio::ip::tcp::endpoint(boost::asio::ip::tcp::v4(),1207));
boost::asio::io_service::work work(ios);
accept(&ios,&acceptor,nullptr,boost::system::error_code());
// std::thread other(boost::bind(serve,&ios));
serve(&ios);
acceptor.close();
ios.stop();
// other.join();
return 0;}
Client:
The client is connecting once to the server and sending 1000 strings.
int main(){
try{
boost::asio::io_service ios;
boost::asio::ip::tcp::socket socket(ios);
boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::address::from_string("127.0.0.1"),1207);
socket.connect(endpoint);
socket.set_option(boost::asio::ip::tcp::no_delay(true));
char buf[257];
for(size_t i=0;i<1000;++i){
size_t n=(i%127)+1;
buf[0]=(char)n;
for(size_t j=0;j<n;++j){
buf[j+1]=(char)('A'+(j+i)%26);}
socket.send(boost::asio::buffer(buf,n+1));
socket.receive(boost::asio::buffer(buf,1));
if((buf[0]&0xFF)!=n){
throw std::exception("Oups!");}
socket.receive(boost::asio::buffer(buf+1,n));
for(size_t j=0;j<n;++j){
if(buf[j+1]!=(char)('A'+(j+i)%26)){
throw std::exception("Oups!");}}
std::cout<<i<<": "<<std::string(buf+1,n)<<std::endl;}}
catch(const std::exception&e){
std::cout<<e.what()<<std::endl;}
return 0;}
When The server uses only one thread (the tread other is commented) the server echos correctly the 1000 strings.
When The server uses the other thread, An EOF is captured in (4) after a random number of printed strings. This should never happen.
I tried wrapping all the async calls with a strand, but it did not work.
As far as I can see, there is no data race issue. Handlers should be called one after another.
What did I miss ?
What is the correct idiom to handle a multithreaded asio application ?
EDIT :
I did a few tests and it appears that if I replace this line
throw std::exception(("This is NOT OK: "+ec.message()).c_str());
with:
std::cout<<"This is not OK: "<<ec.message()<<std::endl;
The server echos correctly the 1000 lines even if I see that a few EOF were incorrectly passed as arguments a few times.
So I guess the question is why do I get an incorrect boost::asio::error::eof when the socket is obviously not closed ?
This is not what is stated here.
This is a bug of boost::asio 1.54.0
I found two similar threads on the internet:
this one.
and that one (on stack overflow).
There is also a bug report here.
I installed boost 1.53 and it is now working just fine.
This issue was a regression and has been fixed in Boost 1.55:
http://www.boost.org/users/history/version_1_55_0.html
Related
Okay, so I might have got myself a big problem here. All this time, I've been basing my code in something I might not have wanted, that is, I'm using synchronous boost::asio functions with a server that can have multiple clients at the same time. Here it is:
void session(tcp::socket socket, std::vector<Player>* pl)
{
debug("New connection! Reading username...\n");
/* ...Username verification code removed... */
debug("Client logged in safely as ");
debug(u->name);
debug("\n");
for (;;)
{
boost::array<unsigned char, 128> buf;
size_t len = socket.read_some(boost::asio::buffer(buf), error);
if (error == boost::asio::error::eof)
{
debug("Connection ended.\n");
break; // Connection closed cleanly by peer.
}
else if (error)
throw boost::system::system_error(error); // Some other error.
DataHeader ins = static_cast<DataHeader>(buf.data()[0]);
std::vector<unsigned char> response;
/* ... Get appropiate response... */
// send response
boost::system::error_code ignored_error;
boost::asio::write(socket, boost::asio::buffer(response), ignored_error);
//debug("Sent ");
//debug(response.size());
//debug("B to client.\n");
}
}
As you can see from the code, I'm using read_some and write functions in a non-ideal scenario. Now, the question is, how did I make this code usable for multiple clients at the same time? Well, I used threads:
int main()
{
try
{
boost::asio::io_context io_context;
tcp::acceptor acceptor(io_context, tcp::endpoint(tcp::v4(), 13));
debug("Ready.\n");
for (;;)
{
std::thread(session, acceptor.accept(), &players).detach(); // Accept incoming clients
}
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
Now, I've never had a problem with this setup until recently, that I started testing multiple clients at the same time on one server. This made the server crash many times, and just until now, I thought the problem were just connection issues. However, now I've started to wonder, "Might the problem be the synchronous functions?"
All the examples I've seen until now of multi-client servers use async functions, and maybe it's because they are needed. So, my final question is, do I really need async functions? Is there anything wrong with this code to make it crash? And finally, if async functions are needed, how could I implement them? Many thanks in advance!
As user VTT has pointed out, although this approach may work for a little bit, it's just better to switch to async functions due to resource exhaustion, so, I'll just redo the entire server to implement them.
I am an AspNet programmer with 57 years of age. Because I was the only one who worked a little, back in the beginning, with C ++, my bosses asked me to serve a customer who needs a communication agent with very specific characteristics. It can run as a daemon on multiple platforms and be both client and server at times. I do not know enough but I have to solve the problem and found a chance in the Boost / Asio library.
I am new to Boost-Asio and reading the documentation I created a server and a TCP socket client that exchanges messages perfectly and two-way, full duplex.
I read several posts where they asked for the same things I want, but all the answers suggested full duplex as if that meant having a client and a server in the same program. And it's not. The definition of full duplex refers to the ability to write and read from the same connection and every TCP connection is full duplex by default.
I need to make two programs can accept connections initiated by the other. There will be no permanent connection between the two programs. Sometimes one of them will ask for a connection and at other times the other will make this request and both need to be listening, accepting the connection, exchanging some messages and terminating the connection until new request is made.
The server I did seems to get stuck in the process of listening to the port to see if a connection is coming in and I can not continue with the process to be able to create a socket and request a connection with the other program. I need threads but I do not know enough about them.
It'is possible?
As I said I'm new to Boost / Asio and I tried to follow some documents of threads and Coroutines. Then I put the client codes in one method and the server in another.:
int main(int argc, char* argv[])
{
try
{
boost::thread t1(&server_agent);
boost::thread t2(&client_agent);
// wait
t1.join();
t2.join();
return 0;
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
and two Coroutines:
void client_agent() {
parameters param;
param.load();
boost::asio::io_service io_service1;
tcp::resolver resolver(io_service1);
char port[5];
_itoa(param.getNrPortaServComunic(), port, 10);
auto endpoint_iterator = resolver.resolve({ param.getIPServComunicPrincipal(), port });
std::list<client> clients;
client c(io_service1, endpoint_iterator, param);
while (true)
{
BOOL enviada = FALSE;
while (true) {
if (!enviada) {
std::cout << "sending a message\n";
int nr = 110;
message msg(nr, param);
c.write(msg);
enviada = TRUE;
}
}
}
c.close();
}
void server_agent() {
parameters param;
param.load();
boost::asio::io_service io_service1;
std::list<server> servers;
tcp::endpoint endpoint(tcp::v4(), param.getNrPortaAgenteServ());
servers.emplace_back(io_service1, endpoint);
io_service1.run();
}
I used one port to client endpoint and other port to server endpoint. Is it correct? Required?
It starts looking like it's going to work. Each of the methods runs concurrently but then I get a thread allocation error at the io_service1.run (last line of the server_agent method):
boost::exception_detail::clone_impl > at memory location 0x0118C61C.
Any suggestion?
You are describing a UDP client/server application. But your implementation is bound to fail. Think of an asio server or client as always running in a single thread.
The following code is just so you get an idea. I haven't tried to compile it. Client is very similar, but may need a transmit buffer, depends on the app, obviously.
This is a shortened version, so you get the idea. In a final application you way want to add receive timeouts and the likes. The same principles hold for TCP servers, with the added async_listen call. Connected sockets can be stored in shared_ptr, and captured by the lambdas, will destroy almost magically.
Server is basically the same, except there is no constant reading going on. If running both server and client in the same process, you can rely on run() to be looping because of the server, but if not, you'd have to call run() for each connection. run() would exit at the end of the exchange.
using namespace boost::asio; // Or whichever way you like to shorten names
class Server
{
public:
Server(io_service& ios) : ios_(ios) {}
void Start()
{
// create socket
// Start listening
Read();
}
void Read()
{
rxBuffer.resize(1024)
s_.async_receive_from(
buffer(rxBuffer),
remoteEndpoint_,
[this](error_code ec, size_t n)
{
OnReceive(ec, n); // could be virtual, if done this way
});
}
void OnReceive(error_code ec, size_t n)
{
rxBuffer_.resize(n);
if (ec)
{
// error ... stops listen loop
return;
}
// grab data, put in txBuffer_
Read();
s_.async_send_to(
buffer(txBuffer_),
remoteEndpoint_,
[this, msg](error_code ec, size_t n)
{
OnTransmitDone(ec, n);
});
}
void OnTransmitDone(error_code ec, size_t n)
{
// check for error?
txBuffer_.clear();
}
protected:
io_service& ios_;
ip::udp::socket s_;
ip::udp::endpoint remoteEndpoint_; // the other's address/port
std::vector<char> rxBuffer_; // could be any data type you like
std::vector<char> txBuffer_; // idem All access is in one thread, so only
// one needed for simple ask/respond ops.
};
int main()
{
io_service ios;
Server server(ios); // could have both server and client run on same thread
// on same io service this way.
Server.Start();
ios_run();
// or std::thread ioThread([&](){ ios_.run(); });
return 0;
}
I am unable to get this example working
http://www.boost.org/doc/libs/1_61_0/doc/html/boost_asio/tutorial/tutdaytime3/src.html
I have changed the port 13 to 1163 so that I don't need to be a root user to start listening.
And I am running the io_service in separate thread.
int main()
{
try
{
boost::asio::io_service io_service;
tcp_server server(io_service);
boost::thread t(boost::bind(&boost::asio::io_service::run, &io_service));
t.detach();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
string wait;
cin >> wait;
return 0;
}
When testing the above server with http://www.boost.org/doc/libs/1_61_0/doc/html/boost_asio/tutorial/tutdaytime1/src.html client, it says connection refused.
netstat --listen didn't show any open ports on 1163
I couldn't figure out how to use boost::asio::async_result<typename Handler> I am confused on Handler.
Working modification
int main()
{
try
{
boost::asio::io_service io_service;
tcp_server server(io_service);
boost::thread t(boost::bind(&boost::asio::io_service::run, &io_service));
t.detach();
string wait;
cin >> wait;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
If the wait is inside the try block, the code working!
If asio can't listen on the port the creation or binding of the acceptor will already fail. In your case you are creating the acceptor with acceptor_(io_service, tcp::endpoint(tcp::v4(), 13)), which is this overload: http://www.boost.org/doc/libs/1_62_0/doc/html/boost_asio/reference/basic_socket_acceptor/basic_socket_acceptor/overload3.html
This will directly try to bind the socket and throw an exception if that fails. The alternative (which is described on the bottom of this page) is to create the socket without an assigned endpoint and call open and bind on it, where bind will fail with an error (e.g. if the socket is already in use). In any case you won't need to wait for accept/async_accept to see an error.
I guess your issue is in the client program, which still tries to connect to port 13. Have you changed it to use port 1163? In the example code the port is not directly written, but a well-known service name is used here: tcp::resolver::query query(argv[1], "daytime");. The "daytime" reference will tell the resolver to use port 13.
Update: Now that that I see the actual code with the thread it's a totally different error:
If the wait is not inside the try block the asio io_service and tcp_server will go out of scope nearly immediatly, which means their destructors are called. This will stop all communication. Even worse, the detached thread now operates on some dangling pointers. As a general rule asio objects (the io_service eventloop, sockets, etc.) should all live in the the thread that uses them. The lifetime of the io_service should be tied to or shorter than the lifetime of a thread. The lifetime of the sockets should be shorter than that of the eventloop (io_service) that runs them. Other usage scenarios might be possible by using shared_ptr's or by putting a lots of thoughts into the design, but that's not what I would recommend.
Observation
I built a demo application according to this server example using ASIO after I used C++11 std to replace everything originally in boost. The server can show that class member tcp_session::start() is called only after the client connects which is good indication that the server accepts the connection from the client.
However, I saw nothing received by handle_read while the clients sends a lot of data. I got some std::cout in handle_read and stop. I put the timeout to be 6 seconds now and found this:
The start is called right after the client connects, and then nothing indicating that the handle_read is called, but after 6 seconds, stop() is called, and then handle_read is called because of the timeout and socket_.isOpen() is false.
Then I found that if I change async_read to async_read_until that was commented originally by me, then the handle_read will be called andthe socket_.isopen is true so I can really see the packets.
Question:
The delimiter was there but I don't want one. How do I async read a whole TCP string without a delimiter? Why async_read doesn't work? Should it work like this? Is there anything wrong in my code?
I am using VS2015 and test on localhost.
Answer
TCP doesn't have boundary so I decided to put special character to indicate the end of each packet.
Here are some relevant code:
class tcp_session : public subscriber, public std::enable_shared_from_this<tcp_session> {
public:
void start() {
std::cout<<"started"<<std::endl;
channel_.join(shared_from_this());
start_read();
input_deadline_.async_wait(
std::bind(&tcp_session::check_deadline, shared_from_this(), &input_deadline_)
);
await_output();
output_deadline_.async_wait(
std::bind(&tcp_session::check_deadline, shared_from_this(), &output_deadline_)
);
}
private:
bool stopped() const {
return !socket_.is_open();// weird that it is still not open
}
void start_read() {
// Set a deadline for the read operation.
input_deadline_.expires_from_now(timeout_); //was std::chrono::seconds(30) in example
char a = 0x7F;
// Start an asynchronous operation to read a 0x7F-delimited message or read all
//asio::async_read_until(socket_, input_buffer_, a, std::bind(&TCP_Session::handle_read, shared_from_this(), std::placeholders::_1));
asio::async_read(socket_, input_buffer_,
std::bind(&TCP_Session::handle_read, shared_from_this(), std::placeholders::_1));
}
void handle_read(const asio::error_code& ec) {
if (stopped()) // it thinks it stopped and returned without processing
return;
I'm working on a multithreaded application in which one thread acts as a tcp server which receives commands from a client. The thread uses a Boost socket and acceptor to wait for a client to connect, receives a command from the client, passes the command to the rest of the application, then waits again. Here's the code:
void ServerThreadFunc()
{
using boost::asio::ip::tcp;
boost::asio::io_service io_service;
tcp::acceptor acceptor(io_service, tcp::endpoint(tcp::v4(), port_no));
for (;;)
{
// listen for command connection
tcp::socket socket(io_service);
acceptor.accept(socket);
// connected; receive command
boost::array<char,256> msg_buf;
socket.receive(boost::asio::buffer(msg_buf));
// do something with received bytes here
}
}
This thread spends most of its time blocked on the call to acceptor.accept(). At the moment, the thread only gets terminated when the application exits. Unfortunately, this causes a crash after main() returns - I believe because the thread tries to access the app's logging singleton after the singleton has been destroyed. (It was like that when I got here, honest guv.)
How can I shut this thread down cleanly when it's time for the application to exit? I've read that a blocking accept() call on a raw socket can be interrupted by closing the socket from another thread, but this doesn't appear to work on a Boost socket. I've tried converting the server logic to asynchronous i/o using the Boost asynchronous tcp echo server example, but that just seems to exchange a blocking call to acceptor::accept() for a blocking call to io_service::run(), so I'm left with the same problem: a blocked call which I can't interrupt. Any ideas?
In short, there are two options:
Change code to be asynchronous (acceptor::async_accept() and async_read), run within the event loop via io_service::run(), and cancel via io_service::stop().
Force blocking calls to interrupt with lower level mechanics, such as signals.
I would recommend the first option, as it is more likely to be the portable and easier to maintain. The important concept to understand is that the io_service::run() only blocks as long as there is pending work. When io_service::stop() is invoked, it will try to cause all threads blocked on io_service::run() to return as soon as possible; it will not interrupt synchronous operations, such as acceptor::accept() and socket::receive(), even if the synchronous operations are invoked within the event loop. It is important to note that io_service::stop() is a non-blocking call, so synchronization with threads that were blocked on io_service::run() must use another mechanic, such as thread::join().
Here is an example that will run for 10 seconds and listens to port 8080:
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/thread.hpp>
#include <iostream>
void StartAccept( boost::asio::ip::tcp::acceptor& );
void ServerThreadFunc( boost::asio::io_service& io_service )
{
using boost::asio::ip::tcp;
tcp::acceptor acceptor( io_service, tcp::endpoint( tcp::v4(), 8080 ) );
// Add a job to start accepting connections.
StartAccept( acceptor );
// Process event loop.
io_service.run();
std::cout << "Server thread exiting." << std::endl;
}
void HandleAccept( const boost::system::error_code& error,
boost::shared_ptr< boost::asio::ip::tcp::socket > socket,
boost::asio::ip::tcp::acceptor& acceptor )
{
// If there was an error, then do not add any more jobs to the service.
if ( error )
{
std::cout << "Error accepting connection: " << error.message()
<< std::endl;
return;
}
// Otherwise, the socket is good to use.
std::cout << "Doing things with socket..." << std::endl;
// Perform async operations on the socket.
// Done using the socket, so start accepting another connection. This
// will add a job to the service, preventing io_service::run() from
// returning.
std::cout << "Done using socket, ready for another connection."
<< std::endl;
StartAccept( acceptor );
};
void StartAccept( boost::asio::ip::tcp::acceptor& acceptor )
{
using boost::asio::ip::tcp;
boost::shared_ptr< tcp::socket > socket(
new tcp::socket( acceptor.get_io_service() ) );
// Add an accept call to the service. This will prevent io_service::run()
// from returning.
std::cout << "Waiting on connection" << std::endl;
acceptor.async_accept( *socket,
boost::bind( HandleAccept,
boost::asio::placeholders::error,
socket,
boost::ref( acceptor ) ) );
}
int main()
{
using boost::asio::ip::tcp;
// Create io service.
boost::asio::io_service io_service;
// Create server thread that will start accepting connections.
boost::thread server_thread( ServerThreadFunc, boost::ref( io_service ) );
// Sleep for 10 seconds, then shutdown the server.
std::cout << "Stopping service in 10 seconds..." << std::endl;
boost::this_thread::sleep( boost::posix_time::seconds( 10 ) );
std::cout << "Stopping service now!" << std::endl;
// Stopping the io_service is a non-blocking call. The threads that are
// blocked on io_service::run() will try to return as soon as possible, but
// they may still be in the middle of a handler. Thus, perform a join on
// the server thread to guarantee a block occurs.
io_service.stop();
std::cout << "Waiting on server thread..." << std::endl;
server_thread.join();
std::cout << "Done waiting on server thread." << std::endl;
return 0;
}
While running, I opened two connections. Here is the output:
Stopping service in 10 seconds...
Waiting on connection
Doing things with socket...
Done using socket, ready for another connection.
Waiting on connection
Doing things with socket...
Done using socket, ready for another connection.
Waiting on connection
Stopping service now!
Waiting on server thread...
Server thread exiting.
Done waiting on server thread.
When you receive an event that it's time to exit, you can call acceptor.cancel(), which will cancel the pending accept (with an error code of operation_canceled). On some systems, you might also have to close() the acceptor as well to be safe.
If it comes to it, you could open a temporary client connection to it on localhost - that will wake it up. You could even send it a special message so that you can shut down your server from the pub - there should be an app for that:)
Simply call shutdown with native handle and the SHUT_RD option, to cancel the existing receive(accept) operation.
The accepted answer is not exactly correct. Infact #JohnYu answered correctly.
Using blocking API of ASIO is much like using BSD sockets API that ASIO library wraps in its classes.
Problem is boost::asio::ip::tcp::acceptor class does not provide shutdown() functionality so you must do it using "old" sockets API.
Additional note: Make sure acceptor, socket and io_service are not deleted before all threads using it exit. In following code std::shared_ptr is used to keep shared resources alive so user of ApplicationContext class can delete the ApplicationContext object and avoid SEGFAULT crash.
Additional note: pay attention to boost documentation, there are overloaded methods that raise exception and ones that return error code. Original Poster's code used acceptor->accept(socket); without try/catch which would cause program exit instead of normal thread-routine exit and cleanup.
Here is the solution description:
#include <unistd.h> // include ::shutdown() function
// other includes ...
using boost::asio::ip::tcp;
using boost::asio::io_service;
class ApplicationContext {
// Use shared pointer to extend life of resources afer ApplicationContext is deleted
// and running threads can still keep using shared resources
std::shared_ptr<tcp::acceptor> acceptor;
std::shared_ptr<io_service> ioservice;
// called `ServerThreadFunc` in question code example
void AcceptLoopThreadRoutine(int port_no) {
ioservice = std::make_shared<io_service>();
acceptor = std::make_shared<tcp::acceptor>(*ioservice, tcp::endpoint(tcp::v4(), port_no));
try {
for (;;) {
// listen for client connection
tcp::socket socket(*ioservice);
// Note boost::system::system_error is raised when using this overload
acceptor->accept(socket);
// connected receive some data ...
// // boost::array<char,256> msg_buf;
// // socket.receive(boost::asio::buffer(msg_buf));
// do something with received bytes here
}
} catch(std::exception const & exception) {
// boost::system::system_error here indicates clean exit ;)
}
}
void StopAcceptThread() {
if(acceptor) {
// boost::asio::ip::tcp::acceptor does not have shutdown() functionality
// exposed, so we need to do it with this low-level approach
int shutdown_status = shutdown(acceptor->native_handle(), SHUT_RDWR);
}
}
};
Also note that using signals to unblock accept thread is very nasty implementation and temporary client connection on localhost to unblock accept thread is very awkward.
The ASIO is here to help you accomplish everything in single thread with callbacks. If you are mixing threads and ASIO chances are your design is bad.
Additional note: Do not confuse shutdown() and close(). Some systems may allow you to use close() on accept socket to unblock accept loop but this is not portable.