Asynchronous server shuts down immediately after creating boost::asio - c++

The server starts and accepts connections, all clients, even if more than 10 are connected, send a message but there is no response.
The read and write function uses the index of the received client's account and works with it. Therefore, there is an additional parameter in the headers.
We accept the connection and pass its number to the header and there with the socket of this number we are working.
#include <iostream>
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <clocale>
#include <vector>
#include <conio.h>
using namespace boost::asio;
using namespace std;
class tcp_server
{
private:
io_service service;
int port;
enum { buff_size = 1024 };
ip::tcp::endpoint endpoint;
ip::tcp::acceptor acceptor;
int countClients = 0;
int accept_i = 0;
struct client
{
ip::tcp::socket sock;
char buff[buff_size] = { };
};
vector<client> clients;
public:
tcp_server(io_service& service, int port) : service(), acceptor(service), endpoint(ip::tcp::v4(), port)
{
this->port;
acceptor.open(endpoint.protocol());
acceptor.set_option(ip::tcp::acceptor::reuse_address(true));
acceptor.bind(endpoint);
acceptor.listen();
clients.reserve(10);
}
void start()
{
start_service_in_thread();
}
void start_service_in_thread()
{
for (int i = 0; i < 10; ++i)
boost::thread(service_func_for_thread);
for (int i = 0; i < 10; ++i)
{
boost::thread(acceptor_func_for_thread);
accept_i++;
}
}
void service_func_for_thread()
{
service.run();
}
void accept_handler(const boost::system::error_code& error)
{
if (!error)
{
countClients++;
do_read_this(countClients - 1);
}
else
{
cout << "Acceptor error\n";
cout << error.message() << endl;
}
}
void acceptor_func_for_thread()
{
acceptor.async_accept(
clients[accept_i].sock,
boost::bind(&tcp_server::accept_handler, this, boost::asio::placeholders::error)
);
}
void do_read_this(int thisClientIndex)
{
clients[thisClientIndex].sock.async_read_some(
buffer(clients[thisClientIndex].buff),
boost::bind(&tcp_server::read_handler,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred,
thisClientIndex)
);
}
void read_handler(const boost::system::error_code& error, size_t bytes_transferred, int thisClientIndex)
{
if (!error)
{
clients[thisClientIndex].sock.async_write_some(
buffer(clients[thisClientIndex].buff),
boost::bind(&tcp_server::write_handler,
this,
boost::asio::placeholders::error,
boost::asio::placeholders::bytes_transferred,
thisClientIndex)
);
}
else
{
cout << "Error reading from socket\n";
cout << error.message() << endl;
}
}
void write_handler(const boost::system::error_code& error, size_t bytes_transferred, int thisClientIndex)
{
if (!error)
{
do_read_this(thisClientIndex);
}
else
{
cout << "Error write in socket\n";
cout << error.message() << endl;
}
}
};
int main(int argc, char *argv[])
{
try
{
setlocale(LC_ALL, "Rus");
io_service service;
tcp_server* server = new tcp_server{ service, 5000 };
server->start();
service.run();
}
catch (exception& ex)
{
cout << "Exception: " << ex.what();
}
return 0;
}
The client connects to the server and when it sends a connection, no response is received.
Please help.

service.run(); in main has nothing to do so it returns immediately so main returns causing program to end.
Creating background threads is not necessary here.
You are (again) creating a temporary objects boost::thread that immediately go out of scope. And unless BOOST_THREAD_PROVIDES_THREAD_DESTRUCTOR_CALLS_TERMINATE_IF_JOINABLE is specified you will end up with a bunch of detached threads.

When the io_service::run() method has no work to do, it returns.
You should either
post() at least one task to the io_service before calling run(),
or "lock" it with io_service::work
io_service service;
boost::asio::io_service::work work(service);
The latter requires a call to service.stop() to cause run() to exit, otherwise it will run eternally.
Note however: you don't really need two io_services or any threads in an async application.

Related

Server socket doesn't work properly - "accept is already open"

I've tried to separate my server socket in a singleton. Here's the code:
ServerSocket.h
#pragma once
#include <asio.hpp>
#include <iostream>
using asio::ip::tcp;
class ServerSocket
{
public:
ServerSocket(ServerSocket& otherSingleton) = delete;
void operator=(const ServerSocket& copySingleton) = delete;
tcp::acceptor* InitAcceptor();
tcp::socket* InitSocket();
void StartServerSocket();
void SendData(std::string);
std::array<char, 5000> RecieveData();
static ServerSocket* GetInstance();
private:
static ServerSocket* instance;
tcp::acceptor* acceptor;
tcp::socket* socket;
asio::io_context io_context;
ServerSocket() {
acceptor = InitAcceptor();
socket = InitSocket();
}
~ServerSocket()
{
std::cout << "Server closed";
}
};
ServerSocket.cpp
#include "ServerSocket.h"
tcp::acceptor* ServerSocket::InitAcceptor()
{
try
{
tcp::acceptor* acceptor = new tcp::acceptor(io_context, tcp::endpoint(tcp::v4(), 27015));
return acceptor;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
tcp::socket* ServerSocket::InitSocket()
{
try
{
tcp::socket* socket = new tcp::socket(io_context);
return socket;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
void ServerSocket::StartServerSocket()
{
try
{
std::cout << "Server started";
for (;;)
{
acceptor->accept(*socket);
};
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
std::array<char, 5000> ServerSocket::RecieveData()
{
try {
std::array<char, 5000> buf;
asio::error_code error;
size_t len = socket->read_some(asio::buffer(buf), error);
buf[len] = '\0';
return buf;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
ServerSocket* ServerSocket::instance(nullptr);
ServerSocket* ServerSocket::GetInstance()
{
if (instance == nullptr)
{
instance = new ServerSocket();
}
return instance;
}
Server socket starts, I get:
Server started
when a client connects, I get:
accept: Already open
and the server stops.
I think the error comes from the acceptor being in a for function. But according to the docs, it should work this way. (or at least that's how I understand - https://think-async.com/Asio/asio-1.20.0/doc/asio/tutorial/tutdaytime2.html)
I tried deleting the for loop, like this:
try
{
std::cout << "Server started";
acceptor->accept(*socket);
}
and now there is no problem. But the connection isn't kept open by the server. The client connects once, sends data, and the server stops running.
As far as I understand from the docs, if I set the acceptor in a for(;;), it should be running - but it doesn't work in my case.
So, how can I keep my socket open in my implementation? I want it to be running for more than one SendData - I want it to be able to communicate with the client as long as the client is connected.
Thanks.
//Edit:
Here's the client code:
#include <iostream>
#include <asio.hpp>
#include "../../cereal/archives/json.hpp"
using asio::ip::tcp;
int main(int argc, char* argv[])
{
try
{
if (argc != 2)
{
std::cerr << "Usage: client <host>" << std::endl;
return 1;
}
// Socket Parameters
const unsigned port = 27015;
auto ip_address = asio::ip::make_address_v4(argv[1]);
auto endpoint = tcp::endpoint{ ip_address, port };
// Creating and Connecting the Socket
asio::io_context io_context;
auto resolver = tcp::resolver{ io_context };
auto endpoints = resolver.resolve(endpoint);
auto socket = tcp::socket{ io_context };
asio::connect(socket, endpoints);
std::array<char, 5000> buf;
std::cout << "Message to server: ";
asio::error_code ignored_error;
std::string username = "test", password = "mihai";
std::stringstream os;
{
cereal::JSONOutputArchive archive_out(os);
archive_out(
CEREAL_NVP(username),
CEREAL_NVP(password)
);
}
asio::write(socket, asio::buffer(os.str()), ignored_error);
return 0;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
return 1;
}
And Communication.h which is responsible to catching the operation from the client and sending it to the server
#pragma once
#include <iostream>
#include "DBUser.h"
#include "DBPost.h"
class Communication
{
public:
enum class Operations {
eLogin,
eRegister
};
void ExecuteOperation(Operations operation,const std::array<char, 5000>& buffer);
};
.cpp
#include "Communication.h"
void Communication::ExecuteOperation(Operations operation,const std::array<char, 5000>& buffer)
{
DBUser* user= DBUser::getInstance();
switch (operation)
{
case Communication::Operations::eLogin:
{
std::string username, password;
std::stringstream is(buffer.data());
{
cereal::JSONInputArchive archive_in(is);
archive_in(username,password);
}
try
{
user->LoginUser(username, password);
}
catch (const std::exception& e)
{
std::cout << e.what();
}
break;
}
case Communication::Operations::eRegister:
{
std::string username, password;
std::stringstream is(buffer.data());
{
cereal::JSONInputArchive archive_in(is);
archive_in(username, password);
}
try
{
user->CreateUser(username, password);
}
catch (const std::exception& e)
{
std::cout << e.what();
}
break;
}
}
}
Main
#include <iostream>
#include <pqxx/pqxx>
#include "DBLink.h"
#include "DBUser.h"
#include "DBPost.h"
#include "../Logging/Logging.h"
#include <iostream>
#include <string>
#include <asio.hpp>
#include "ServerSocket.h"
#include "Communication.h"
int main()
{
ServerSocket* test = ServerSocket::GetInstance();
test->StartServerSocket();
std::array<char, 5000> buf = test->RecieveData();
Communication communicationInterface;
communicationInterface.ExecuteOperation(Communication::Operations::eRegister, buf);
system("pause");
}
There's a lot of antipattern going on.
Overuse of pointers.
Overuse of new (without any delete, a guaranteed leak)
The destructor claims that "Server closed" but it doesn't actually do a single thing to achieve that.
Two-step initialization (InitXXXX functions). Firstly, you should obviously favor initializer lists
ServerSocket()
: acceptor_(InitAcceptor()), socket_(InitSocket())
{ }
And you need to makeInitAcceptor/InitSocket private to the implementation.
I'll forget the Singleton which is anti-pattern 99% of the time, but I guess that's almost debatable.
In your StartServerSocket you have a loop that reuses the same socket all the time. Of course, it will already be connected. You need separate socket instances:
for (;;) {
acceptor_->accept(*socket_);
};
Simplify/Fix
#include <boost/asio.hpp>
#include <iostream>
namespace asio = boost::asio;
using asio::ip::tcp;
struct Listener {
void Start()
{
std::cout << "Server started";
for (;;) {
auto socket = acceptor_.accept();
std::cout << "Accepted connection from " << socket.remote_endpoint()
<< std::endl;
};
}
static Listener& GetInstance() {
static Listener s_instance{27015}; // or use weak_ptr for finite lifetime
return s_instance;
}
private:
asio::io_context ioc_; // order of declaration is order of init!
tcp::acceptor acceptor_;
Listener(uint16_t port) : acceptor_{ioc_, tcp::endpoint{tcp::v4(), port}} {}
};
int main() {
try {
Listener::GetInstance().Start();
} catch (std::exception const& e) {
std::cerr << e.what() << std::endl;
}
}
Now you could hand the socket instances to a thread. I concur with the other commenters that thread-per-request is likely also an anti-pattern, and you should consider using async IO with Asio (hence the name).
Live Demo
EDIT complete and working example based on the server code from the question:
// main.cxx
#include "ServerSocket.hxx"
#include <boost/asio.hpp>
#include <iostream>
#include <string>
using boost::asio::ip::tcp;
int
main ()
{
ServerSocket *test = ServerSocket::GetInstance ();
test->StartServerSocket ();
std::cout << std::endl;
while (auto msg = test->RecieveData ())
{
std::cout << msg.value ();
}
}
// ServerSocket.hxx
#pragma once
#include <boost/asio.hpp>
#include <iostream>
#include <optional>
using boost::asio::ip::tcp;
class ServerSocket
{
public:
ServerSocket (ServerSocket &otherSingleton) = delete;
void operator= (const ServerSocket &copySingleton) = delete;
tcp::acceptor *InitAcceptor ();
tcp::socket *InitSocket ();
void StartServerSocket ();
void SendData (std::string);
std::optional<std::string> RecieveData ();
static ServerSocket *GetInstance ();
private:
static ServerSocket *instance;
tcp::acceptor *acceptor;
tcp::socket *socket;
boost::asio::io_context io_context;
ServerSocket ()
{
acceptor = InitAcceptor ();
socket = InitSocket ();
}
~ServerSocket () {
delete socket;
delete acceptor;
std::cout << "Server closed"; }
};
// ServerSocket.cxx
#include "ServerSocket.hxx"
#include <optional>
tcp::acceptor *
ServerSocket::InitAcceptor ()
{
try
{
return new tcp::acceptor (io_context, tcp::endpoint (tcp::v4 (), 27015));
}
catch (std::exception &e)
{
std::cerr << e.what () << std::endl;
}
return nullptr;
}
tcp::socket *
ServerSocket::InitSocket ()
{
try
{
return new tcp::socket (io_context);
}
catch (std::exception &e)
{
std::cerr << e.what () << std::endl;
}
return nullptr;
}
void
ServerSocket::StartServerSocket ()
{
try
{
std::cout << "Server started";
acceptor->accept (*socket);
}
catch (std::exception &e)
{
std::cerr << e.what () << std::endl;
}
}
std::optional<std::string>
ServerSocket::RecieveData ()
{
try
{
char data[5000];
for (;;)
{
boost::system::error_code error;
size_t length = socket->read_some (boost::asio::buffer (data), error);
if (error == boost::asio::error::eof) return std::nullopt; // Connection closed cleanly by peer.
else if (error)
throw boost::system::system_error (error); // Some other error.
return std::string{ data, length };
}
}
catch (std::exception &e)
{
std::cerr << e.what () << std::endl;
}
return {};
}
ServerSocket *ServerSocket::instance (nullptr);
ServerSocket *
ServerSocket::GetInstance ()
{
if (instance == nullptr)
{
instance = new ServerSocket ();
}
return instance;
}
Note that there are still some problems with the server:
Error handling
More than one connection
The server does not send a message if the operation was successful
If you disconnect the client the server shuts down
We could replace some pointers with optional no need to write "new"
Just make a normal class do not write it as singleton.
If you like to test the server you can run
telnet localhost 27015
and then write some text and press enter

C++ Boost UDP receiver fails when put into thread

I have a UDP receiver that works. The code is here:
#include <array>
#include <iostream>
#include <string>
#include <boost/asio.hpp>
std::string getMyIp()
{
std::string result;
try
{
boost::asio::io_service netService;
boost::asio::ip::udp::resolver resolver(netService);
boost::asio::ip::udp::udp::resolver::query query(boost::asio::ip::udp::v4(), "google.com", "");
boost::asio::ip::udp::udp::resolver::iterator endpoints = resolver.resolve(query);
boost::asio::ip::udp::udp::endpoint ep = *endpoints;
boost::asio::ip::udp::udp::socket socket(netService);
socket.connect(ep);
boost::asio::ip::address addr = socket.local_endpoint().address();
result = addr.to_string();
//std::cout << "My IP according to google is: " << results << std::endl;
}
catch (std::exception& e)
{
std::cerr << "Could not deal with socket. Exception: " << e.what() << std::endl;
}
return result;
}
class receiver
{
private:
boost::asio::ip::udp::socket socket_;
boost::asio::ip::udp::endpoint sender_endpoint_;
std::array<char, 1024> data_;
public:
receiver(boost::asio::io_service& io_service,
const boost::asio::ip::address& listen_address,
const boost::asio::ip::address& multicast_address,
unsigned short multicast_port = 13000)
: socket_(io_service)
{
// Create the socket so that multiple may be bound to the same address.
boost::asio::ip::udp::endpoint listen_endpoint(listen_address, multicast_port);
socket_.open(listen_endpoint.protocol());
socket_.set_option(boost::asio::ip::udp::socket::reuse_address(true));
socket_.bind(listen_endpoint);
// Join the multicast group.
socket_.set_option(boost::asio::ip::multicast::join_group(multicast_address));
do_receive();
}
private:
void do_receive()
{
socket_.async_receive_from(boost::asio::buffer(data_), sender_endpoint_, [this](boost::system::error_code ec, std::size_t length)
{
if (!ec)
{
std::cout.write(data_.data(), length);
std::cout << std::endl;
do_receive();
}
});
}
};
int main(int argc, char* argv[])
{
try
{
boost::asio::io_service io_service;
receiver r(io_service, boost::asio::ip::make_address(getMyIp()), boost::asio::ip::make_address("224.0.0.0"), 13000);
io_service.run();
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
return 0;
}
I want to put the receiver code into a thread inside a class so I can do other things beside it:
#define _CRT_SECURE_NO_WARNINGS
#include <ctime>
#include <iostream>
#include <string>
#include <queue>
#include <boost/array.hpp>
#include <boost/bind.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/asio.hpp>
#include <boost/lexical_cast.hpp>
#include <boost/thread.hpp>
#include <boost/thread/thread.hpp>
#include <boost/chrono.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
using boost::asio::ip::udp;
using std::cout;
using std::cin;
using std::endl;
using std::string;
using namespace std;
std::string getMyIp()
{
std::string result;
try
{
boost::asio::io_service netService;
boost::asio::ip::udp::resolver resolver(netService);
boost::asio::ip::udp::udp::resolver::query query(boost::asio::ip::udp::v4(), "google.com", "");
boost::asio::ip::udp::udp::resolver::iterator endpoints = resolver.resolve(query);
boost::asio::ip::udp::udp::endpoint ep = *endpoints;
boost::asio::ip::udp::udp::socket socket(netService);
socket.connect(ep);
boost::asio::ip::address addr = socket.local_endpoint().address();
result = addr.to_string();
//std::cout << "My IP according to google is: " << results << std::endl;
}
catch (std::exception& e)
{
std::cerr << "Could not deal with socket. Exception: " << e.what() << std::endl;
}
return result;
}
class UdpReceiver
{
private:
boost::asio::ip::udp::socket socket_;
boost::asio::ip::udp::endpoint sender_endpoint_;
std::array<char, 1024> data_;
string address_send, address_recv;
unsigned short port_send, port_recv;
boost::thread_group threads; // thread group
boost::thread* thread_main; // main thread
boost::thread* thread_receive; // receive thread
boost::thread* thread_send; // get/send thread
boost::mutex stopMutex;
bool initialize = false;
bool stop, showBroadcast;
int i_send, i_recv, i_operator,
interval_send, interval_recv, interval_operator,
mode;
string message_send, message_recv;
string message_STOP = "STOP";
public:
// constructor
UdpReceiver(boost::asio::io_service& io_service, std::string address, unsigned short port, int interval, int mode, bool show = false)
: socket_(io_service),
showBroadcast(show)
{
initialize = false;
Initialize(io_service, show);
}
UdpReceiver(boost::asio::io_service& io_service, bool show = false)
: socket_(io_service),
showBroadcast(show)
{
Initialize(io_service, show);
}
// destructor
~UdpReceiver()
{
// show exit message
cout << "Exiting UDP Core." << endl;
}
// initialize
void Initialize(boost::asio::io_service& io_service, bool show = false)
{
if (initialize == false)
{
GetMode(true);
GetInfo(true);
}
CreateEndpoint(io_service);
CreateThreads();
stop = false;
showBroadcast = show;
i_send = 0;
i_recv = 0;
i_operator = 0;
message_send.clear();
message_recv.clear();
initialize = true; // clear flag
}
void GetMode(bool default_value = false)
{
std::string input;
if (default_value)
{
mode = 0;
}
else
{
string prompt = "Set mode:\n0/other - Listen\n1 - Send\nEnter your choice: ";
cout << prompt;
getline(cin, input);
try
{
mode = stoi(input);
// set default mode to Listen
if (mode > 1)
mode = 0;
}
catch (exception ec)
{
cout << "Error converting mode: " << ec.what() << endl;
Stop();
}
}
}
void GetInfo(bool default_value = false)
{
// always called after GetMode()
string address;
unsigned short port;
int interval;
if (default_value)
{
address = getMyIp();
port = 13000;
interval = 500;
}
switch (mode)
{
case 0:
address_recv = address;
port_recv = port;
interval_recv = interval;
break;
case 1:
address_send = address;
port_send = port;
interval_send = interval;
break;
default:
// already set to 0 in GetMode()
break;
}
}
void CreateEndpoint(boost::asio::io_service& io_service)
{
// Create the socket so that multiple may be bound to the same address.
boost::asio::ip::udp::endpoint listen_endpoint(boost::asio::ip::address::from_string(address_recv), port_recv);
socket_.open(listen_endpoint.protocol());
socket_.set_option(boost::asio::ip::udp::socket::reuse_address(true));
socket_.bind(listen_endpoint);
// Join the multicast group.
socket_.set_option(boost::asio::ip::multicast::join_group(boost::asio::ip::address::from_string("224.0.0.0")));
}
void CreateThreads()
{
thread_main = new boost::thread(boost::ref(*this));
interval_operator = 500; // default value
switch (mode)
{
case 0:
thread_receive = new boost::thread(&UdpReceiver::Callable_Receive, this);
threads.add_thread(thread_receive);
break;
default:
// already set to 0 in GetMode()
break;
}
}
// start the threads
void Start()
{
// Wait till they are finished
threads.join_all();
}
// stop the threads
void Stop()
{
// warning message
cout << "Stopping all threads." << endl;
// signal the threads to stop (thread-safe)
stopMutex.lock();
stop = true;
stopMutex.unlock();
// wait for the threads to finish
thread_main->interrupt(); // in case not interrupted by operator()
threads.interrupt_all();
threads.join_all();
// close socket after everything closes
//socketPtr->close();
socket_.close();
}
void Callable_Receive()
{
while (!stop)
{
stopMutex.lock();
socket_.async_receive_from(boost::asio::buffer(data_), sender_endpoint_, [this](boost::system::error_code ec, std::size_t length)
{
if (!ec)
{
//cout << message_recv << endl;
std::cout.write(data_.data(), length);
std::cout << std::endl;
Callable_Receive();
}
});
stopMutex.unlock();
//cout << i_recv << endl;
++i_recv;
}
}
// Thread function
void operator () ()
{
while (!stop)
{
if (message_send == message_STOP)
{
try
{
this->Stop();
}
catch (exception e)
{
cout << e.what() << endl;
}
}
boost::this_thread::sleep(boost::posix_time::millisec(interval_operator));
boost::this_thread::interruption_point();
}
}
};
int main()
{
try
{
boost::asio::io_service io_service;
UdpReceiver mt(io_service, false);
mt.Start();
}
catch (std::exception& e)
{
std::cerr << "Exception: " << e.what() << "\n";
}
}
The async receive is inside Callable_Receive(), which is inside by thread_receive. I can see that thread running when the counter is printed on screen (which I comment out). However, the async_receive_from() never receives anything. Could someone tell me why this happens?
You have probably deadlock in Callable_Receive. In thread with Callable_Receive as body of thread you are calling stopMutex.lock before invoking async_receive_from function. async_receive_from returns immediately, but we don't know when lambda object passed as third paremeter to async_receive_from will be called. When body of lambda object is executed, you are calling Callable_Receive function, if stopMutex was locked (thread with Callable_Receive is still running and next iteration in while loop is being done) and you try to lock it again, you would get deadlock - on boost::mutex you cannot call lock method while mutex is already being locked by the same thread.
You should read about boost::recursive_mutex if you want to resolve this issue.

Boost Asio - Why do my asynchronous operations not launch?

I recently met a problem with boost::asio asynchronous tasks. I want to return a pointer on an object listening to a port.
It works when I use the socket.read_some method but this method blocks my main and I want my MyClass::create method to return.
So I tried a async_read call but I saw that inside my read() method, no asynchronous tasks are launched. I tried to figure out what may cause the problem but see no solution to this issue.
Here is my code, here it's not with an async_read but with an async_wait, and the same problem appears, the timer is not launched.
Thanks for any help I might get.
The header file:
#ifndef MYCLASS_HPP
#define MYCLASS_HPP
#include <memory>
#include <boost/asio.hpp>
class MyClass
{
public:
MyClass(boost::asio::io_service& ios);
void read();
void read_handler(const boost::system::error_code& error);
static std::shared_ptr<MyClass> create(std:: string const & host, uint16_t port);
bool connect (std::string const & host, uint16_t port);
void connect_handler(const boost::system::error_code& error);
boost::asio::ip::tcp::socket m_socket;
bool m_flag;
std::vector<uint8_t> m_buffer;
};
#endif
Source file:
#include "MyClass.hpp"
#include <boost/bind.hpp>
MyClass::MyClass(boost::asio::io_service& ios)
:m_flag(false), m_socket(ios), m_buffer(20)
{
}
void MyClass::read_handler(const boost::system::error_code& er)
{
std::cout << "Timer waited 5 sec" << std::endl;
}
void MyClass::read()
{
boost::asio::deadline_timer t(m_socket.get_io_service(),boost::posix_time::seconds(5));
t.async_wait(boost::bind(&MyClass::read_handler,this,boost::asio::placeholders::error));
m_socket.get_io_service().run();//Should make the io_service wait for all asynchronous tasks to finish
std::cout << "This message should be displayed after the wait" << std::endl;
}
void MyClass::connect_handler(const boost::system::error_code& error)
{
if(!error)
{
std::cout << "Connection done" << std::endl;
m_flag = 1;
}
else
{
std::cout << "Error in connection: " << error.message() << std::endl;
}
}
//connect method
bool MyClass::connect(std::string const & host, uint16_t port)
{
boost::asio::ip::tcp::endpoint endpoint(boost::asio::ip::address::from_string(host),port);
m_socket.async_connect(endpoint,
boost::bind(&MyClass::connect_handler, this,
boost::asio::placeholders::error));
m_socket.get_io_service().run();//Wait async_connect and connect_handler to finish
if (m_flag == 0) return false;
else return true;
}
std::shared_ptr<MyClass> MyClass::create(std:: string const & host, uint16_t port)
{
boost::asio::io_service ios;
std::shared_ptr<MyClass> ptr(new MyClass(ios));
bool bol = ptr->connect(host, port);
ptr->read();
//while(1){}
if(bol == true)
{
//connection success, reading currently listening, pointer is returned to the user
return ptr;
}
else
{
//connection failure, pointer is still returned to the user but not listening as he's not connected
return ptr;
}
}
And my main:
#include <boost/date_time/posix_time/posix_time.hpp>
#include <boost/asio.hpp>
#include "MyClass.hpp"
int main()
{
try
{
std::cout << "Creation of instance" << std::endl;
std::shared_ptr <MyClass> var = MyClass::create("127.0.0.1", 8301);
std::cout << "Instance created" << std::endl;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
I figured out how to solve my problem.
I had indeed problems with io_service being destroyed after "create" method, so the pointer returned in the main was not able to continue reading.
I had to call run() at one point to launch callbacks but i couldn't do it in the main, as I wanted the main to keep doing other things.
So I created a class launching a separated thread and containing an io_service. That thread is calling run() periodically. It was then added as an attribute to MyClass.
Now I have the call to "create" returning a pointer to MyClass who doesn't stop whatever asynchronous task was launched in MyClass.

boost asio deadline_timer async_wait(N seconds) twice within N seconds cause operation canceled

What I want is when one message queue receives an int N, the handler function will be called after N seconds. below is my code.
It runs OK if the duration seconds of two near message queue is larger than the int N, but the handler will print "Operation canceled" in one handler when the duration seconds between two received message queues are smaller than N, which is not what I want.
I'd appreciate a lot for any help.
#include <boost/asio.hpp>
#include <zmq.h>
#include <boost/thread.hpp>
#include <iostream>
boost::asio::io_service io_service;
void* context = zmq_ctx_new();
void* sock_pull = zmq_socket(context, ZMQ_PULL);
void handler(const boost::system::error_code &ec) {
std::cout << "hello, world" << "\t" << ec.message() << std::endl;
}
void run() {
io_service.run();
}
void thread_listener() {
int nRecv;
boost::asio::deadline_timer timer(io_service, boost::posix_time::seconds(0));
while( true ) {
zmq_recv(sock_pull, &nRecv, sizeof(nRecv), 0);
std::cout << nRecv << std::endl;
timer.expires_from_now(boost::posix_time::seconds(nRecv));
timer.async_wait(handler);
}
}
int main(int argc, char* argv[]) {
boost::asio::io_service::work work(io_service);
zmq_bind(sock_pull, "tcp://*:60000");
boost::thread tThread(thread_listener);
boost::thread tThreadRun(run);
tThread.join();
tThreadRun.join();
return 0;
}
When you call
timer.expires_from_now(boost::posix_time::seconds(nRecv));
this, as the documentation states, cancels any async timer pending.
If you want to have overlapping requests in flight at a given time, one timer is clearly not enough. Luckily there is a wellknown pattern around bound shared pointers in Asio that you can use to mimick a "session" per response.
Say you define a session to contain it's own private timer:
struct session : boost::enable_shared_from_this<session> {
session(boost::asio::io_service& svc, int N) :
timer(svc, boost::posix_time::seconds(N))
{
// Note: shared_from_this is not allowed from ctor
}
void start() {
// it's critical that the completion handler is bound to a shared
// pointer so the handler keeps the session alive:
timer.async_wait(boost::bind(&session::handler, shared_from_this(), boost::asio::placeholders::error));
}
private:
void handler(const boost::system::error_code &ec) {
std::cout << "hello, world" << "\t" << ec.message() << std::endl;
}
boost::asio::deadline_timer timer;
};
Now, it's trivial to replace the code that used the hardcoded timer instance:
timer.expires_from_now(boost::posix_time::seconds(nRecv));
timer.async_wait(handler);
with the session start:
boost::make_shared<session>(io_service, nRecv)->start();
A fully working example (with suitably stubbed ZMQ stuff): Live On Coliru
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <boost/make_shared.hpp>
#include <iostream>
boost::asio::io_service io_service;
/////////////////////////////////////////////////////////////////////////
// I love stubbing out stuff I don't want to install just to help others
enum { ZMQ_PULL };
static void* zmq_ctx_new() { return nullptr; }
static void* zmq_socket(void*,int) { return nullptr; }
static void zmq_bind(void*,char const*) {}
static void zmq_recv(void*,int*data,size_t,int)
{
boost::this_thread::sleep_for(boost::chrono::milliseconds(rand()%1000));
*data = 2;
}
// End of stubs :)
/////////////////////////////////////////////////////////////////////////
void* context = zmq_ctx_new();
void* sock_pull = zmq_socket(context, ZMQ_PULL);
struct session : boost::enable_shared_from_this<session> {
session(boost::asio::io_service& svc, int N) :
timer(svc, boost::posix_time::seconds(N))
{
// Note: shared_from_this is not allowed from ctor
}
void start() {
// it's critical that the completion handler is bound to a shared
// pointer so the handler keeps the session alive:
timer.async_wait(boost::bind(&session::handler, shared_from_this(), boost::asio::placeholders::error));
}
~session() {
std::cout << "bye (session end)\n";
}
private:
void handler(const boost::system::error_code &ec) {
std::cout << "hello, world" << "\t" << ec.message() << std::endl;
}
boost::asio::deadline_timer timer;
};
void run() {
io_service.run();
}
void thread_listener() {
int nRecv = 0;
for(int n=0; n<4; ++n) {
zmq_recv(sock_pull, &nRecv, sizeof(nRecv), 0);
std::cout << nRecv << std::endl;
boost::make_shared<session>(io_service, nRecv)->start();
}
}
int main() {
auto work = boost::make_shared<boost::asio::io_service::work>(io_service);
zmq_bind(sock_pull, "tcp://*:60000");
boost::thread tThread(thread_listener);
boost::thread tThreadRun(run);
tThread.join();
work.reset();
tThreadRun.join();
}

Where does the mysterious 200 connection come from?

Hey guys, i'm a newbie to async-programming, this is probably a stupid question, but it indeed drove me crazy!!
Here's the code (it just modified a bit from boost.asio's sample):
server.cpp:
class tcp_server
{
public:
tcp_server(boost::asio::io_service& io_service)
: acceptor_(io_service, tcp::endpoint(tcp::v4(), 10000)),limit(0)
{
start_accept();
}
private:
void start_accept()
{
while(1)
{
if(limit <= 10)
{
std::cout << limit << std::endl;
break;
}
}
tcp::socket* socket = new tcp::socket(acceptor_.io_service());
acceptor_.async_accept(*socket,
boost::bind(&tcp_server::handle_accept, this, boost::asio::placeholders::error));
}
void handle_accept(const boost::system::error_code& error)
{
if (!error)
{
++limit ;
start_accept();
}
}
tcp::acceptor acceptor_;
int limit;
};
int main()
{
try
{
boost::asio::io_service io_service;
tcp_server server(io_service);
io_service.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
client.cpp:
int main(int argc, char* argv[])
{
int i = 0;
while(1)
{
try
{
boost::asio::io_service io_service;
tcp::resolver resolver(io_service);
tcp::resolver::query query("127.0.0.1", "10000");
tcp::resolver::iterator endpoint_iterator =resolver.resolve(query);
tcp::endpoint endpoint = *endpoint_iterator;
tcp::socket socket(io_service);
socket.close();
socket.connect(endpoint);
std::cout << i++ << std::endl;
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
}
return 0;
}
I just wanna limit server to accept 10 client.
However, client cout the error information after it cout "amazing" 210 (never more or less) continuous numbers.
What happend??
I've changed server.cpp a bit. First reconfigured acceptor_ on constructor. Removed while loop, added acceptor_.close();
#include <boost/asio/io_service.hpp>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
using namespace boost::asio;
using namespace boost::asio::ip;
class tcp_server
{
public:
tcp_server(boost::asio::io_service& io_service)
: acceptor_(io_service),limit(0)
{
tcp::endpoint endpoint(tcp::v4(), 10000);
acceptor_.open(endpoint.protocol());
acceptor_.bind(endpoint);
acceptor_.listen(1); //accept 1 connection at a time
start_accept();
}
private:
void start_accept()
{
tcp::socket* socket = new tcp::socket(acceptor_.io_service());
acceptor_.async_accept(*socket,
boost::bind(
&tcp_server::handle_accept,
this,
socket,
boost::asio::placeholders::error));
}
void handle_accept(tcp::socket* s, const boost::system::error_code& error)
{
if (!error)
{
++limit;
if (limit < 9)
{
start_accept();
}
else
{
acceptor_.close();
}
}
}
tcp::acceptor acceptor_;
int limit;
};
int main()
{
try
{
boost::asio::io_service io_service;
tcp_server server(io_service);
io_service.run();
}
catch (std::exception& e)
{
std::cerr << e.what() << std::endl;
}
return 0;
}
I suppose, default acceptor can async_accept 200 connection events at a time. You open a socket and close it from the client side in an infinite loop. As a result you open and close a connection 200 times, but it is still 1 connection, 1 socket.
Capping it to 1 by calling listen(1), would force the acceptor to fire an event. You increase the count, then client closes the connection. This way you correctly count each connection event.
Last note: async io uses 1 thread to process connection events, retrieved data etc... Thus, use of mutexes are not necessary.