I am using a blocking queue example I got from this website, thinking it was pretty nice.
This blocking queue is using boost::mutex.
It is sometime throwing an exception :
terminate called after throwing an instance of 'boost::exception_detail::clone_impl<boost::exception_detail::error_info_injector<boost::system::system_error> >'
what(): Bad file descriptor
Here's the Blocking Queue code :
#include <boost/thread/mutex.hpp>
#include <boost/thread/thread.hpp>
#include <boost/thread/condition_variable.hpp>
#include <exception>
#include <list>
#include <stdio.h>
struct BlockingQueueTerminate
: std::exception
{};
namespace tools {
template<class T>
class BlockingQueue
{
private:
boost::mutex mtx_;
boost::condition_variable cnd_;
std::list<T> q_;
unsigned blocked_;
bool stop_;
public:
BlockingQueue()
: blocked_()
, stop_()
{}
~BlockingQueue()
{
this->stop(true);
}
void stop(bool wait)
{
// tell threads blocked on BlockingQueue::pull() to leave
boost::mutex::scoped_lock lock(mtx_);
stop_ = true;
cnd_.notify_all();
if(wait) // wait till all threads blocked on the queue leave BlockingQueue::pull()
while(blocked_)
cnd_.wait(lock);
}
void put(T t)
{
boost::mutex::scoped_lock lock(mtx_); // The exception is thrown here !
q_.push_back(t);
cnd_.notify_one();
}
T pull()
{
boost::mutex::scoped_lock lock(mtx_);
++blocked_;
while(!stop_ && q_.empty())
cnd_.wait(lock);
--blocked_;
if(stop_) {
cnd_.notify_all(); // tell stop() this thread has left
throw BlockingQueueTerminate();
}
T front = q_.front();
q_.pop_front();
return front;
}
};
}
Anyone can spot what's going wrong here ? because I have tried the all day figuring it out in vain. I guess I need a outside eye to see it.
Look for the comment '//The exception is thrown here !' to see where exactly the problem occurs.
EDIT 1 :
The context : I'm using this blocking queue in order to create a MySQL async wrapper.
Here's my MySQL.hh
#ifndef MYSQL_HH_
# define MYSQL_HH_
# include <boost/asio.hpp>
# include <boost/thread.hpp>
# include <boost/function.hpp>
# include <mysql++/mysql++.h>
# include <queue>
# include "async_executor.hh"
# include "BlockingQueue.hh"
class t_mysql_event {
public:
t_mysql_event(std::string query, boost::function<void(mysqlpp::StoreQueryResult)> cb) :
m_query(query), m_store_cb(cb), m_store_bool(true) {}
t_mysql_event(std::string query, boost::function<void()> cb) :
m_query(query), m_exec_cb(cb), m_store_bool(false) {}
bool is_store_query() {
return m_store_bool;
}
std::string toString() {
return m_query;
}
std::string m_query;
boost::function<void(mysqlpp::StoreQueryResult)> m_store_cb;
boost::function<void()> m_exec_cb;
private:
bool m_store_bool;
};
namespace pools {
class MySQL {
public:
~MySQL() {}
static MySQL* create_instance(boost::asio::io_service& io);
static MySQL* get_instance();
void exec(std::string query, boost::function<void()> cb);
void store(std::string query, boost::function<void(mysqlpp::StoreQueryResult)> cb);
private:
MySQL(boost::asio::io_service& io) : executor(io, 100), parent_io(io), m_strand(io)
{
for (int i=0; i < 100; ++i) {
boost::thread(boost::bind(&MySQL::retreive, this));
}
}
void async_exec(std::string query, boost::function<void()> cb, mysqlpp::Connection& conn);
void async_store(std::string query, boost::function<void(mysqlpp::StoreQueryResult)> cb, mysqlpp::Connection& conn);
void retreive();
private:
tools::async_executor executor;
boost::asio::io_service& parent_io;
boost::asio::strand m_strand;
tools::BlockingQueue<t_mysql_event*> m_events;
std::queue<mysqlpp::Connection*> m_stack;
};
}
#endif //MYSQL_HH_
Here's the MySQL.cc :
#include "MySQL.hh"
static pools::MySQL* _instance = 0;
namespace pools {
MySQL* MySQL::create_instance(boost::asio::io_service& io) {
if (!_instance)
_instance = new MySQL(io);
return _instance;
}
MySQL* MySQL::get_instance() {
if (!_instance) {
exit(1);
}
return _instance;
}
void MySQL::exec(std::string query, boost::function<void()> cb) {
m_events.put(new t_mysql_event(query, cb));
}
void MySQL::store(std::string query, boost::function<void(mysqlpp::StoreQueryResult)> cb) {
m_events.put(new t_mysql_event(query, cb));
}
void MySQL::retreive() {
mysqlpp::Connection conn("***", "***", "***", "***");
for(;;) {
t_mysql_event *event = m_events.pull();
if (event->is_store_query())
async_store(event->m_query, event->m_store_cb, conn);
else
async_exec(event->m_query, event->m_exec_cb, conn);
delete event;
}
}
void MySQL::async_exec(std::string query, boost::function<void()> cb, mysqlpp::Connection& conn) {
mysqlpp::Query db_q = conn.query(query.c_str());
db_q.exec();
parent_io.post(cb);
}
void MySQL::async_store(std::string query, boost::function<void(mysqlpp::StoreQueryResult)> cb, mysqlpp::Connection& conn) {
mysqlpp::Query db_q = conn.query(query.c_str());
mysqlpp::StoreQueryResult res = db_q.store();
parent_io.post(boost::bind(cb, res));
}
}
Afterwards :
class MyClass {
public:
MyClass() : _mysql(pools::MySQL::get_instance()) {}
startQueries();
private:
void Query1() {
std::stringstream query("");
query << "INSERT INTO Table1 ***";
_mysql->exec(query.str(),
boost::bind(&MyClass::Query2, this, _1));
}
void Query2() {
std::stringstream query("");
query << "INSERT INTO Table2 ***";
_mysql->exec(query.str(),
boost::bind(&MyClass::Query3, this, _1));
}
void Query3() {
std::stringstream query("");
query << "INSERT INTO Table3 ***";
_mysql->exec(query.str(),
boost::bind(&MyClass::done, this, _1));
}
void done() {}
pools::MySQL *_mysql;
};
Hoping that will answer to some request for more informations...
Funny thing :
If I replace every _mysql by pools::MySQL::get_instance() I does not seems to crash.
But I suspect there is an error far more important below that...
this exception can be thrown if queue is already destroyed but you try to call its put method. Check this by putting a breakpoint (or print statement) in queue destructor.
Related
In the following code, I create a toy class that has a thread which writes to a queue while the other thread reads from that queue and prints it to stdout. Now, in order to cleanly shutdown the system, I setup a handler for SIGINT. I am expecting the signal handler to set up the std::atomic<bool> variable stopFlag, which will lead threadB to push a poison pill (sentinel) on to the queue encountering which threadA will halt.
class TestClass
{
public:
TestClass();
~TestClass();
void shutDown();
TestClass(const TestClass&) = delete;
TestClass& operator=(const TestClass&) = delete;
private:
void init();
void postResults();
std::string getResult();
void processResults();
std::atomic<bool> stopFlag;
std::mutex outQueueMutex;
std::condition_variable outQueueConditionVariable;
std::queue<std::string> outQueue;
std::unique_ptr<std::thread> threadA;
std::unique_ptr<std::thread> threadB;
};
void TestClass::init()
{
threadA = std::make_unique<std::thread>(&TestClass::processResults, std::ref(*this));
threadB = std::make_unique<std::thread>(&TestClass::postResults, std::ref(*this));
}
TestClass::TestClass():
stopFlag(false)
{
init();
}
TestClass::~TestClass()
{
threadB->join();
}
void TestClass::postResults()
{
while(true)
{
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
std::string name = "ABCDEF";
{
std::unique_lock<std::mutex> lock(outQueueMutex);
outQueue.push(name);
outQueueConditionVariable.notify_one();
}
if(stopFlag)
{
/*For shutting down output thread*/
auto poisonPill = std::string();
{
std::unique_lock<std::mutex> lock(outQueueMutex);
outQueue.push(poisonPill);
outQueueConditionVariable.notify_one();
}
threadA->join();
break;
}
}
}
void TestClass::shutDown()
{
stopFlag = true;
}
std::string TestClass::getResult()
{
std::string result;
{
std::unique_lock<std::mutex> lock(outQueueMutex);
while(outQueue.empty())
{
outQueueConditionVariable.wait(lock);
}
result= outQueue.front();
outQueue.pop();
}
return result;
}
void TestClass::processResults()
{
while(true)
{
const auto result = getResult();
if(result.empty())
{
break;
}
std::cout << result << std::endl;
}
}
static void sigIntHandler(std::shared_ptr<TestClass> t, int)
{
t->shutDown();
}
static std::function<void(int)> handler;
int main()
{
auto testClass = std::make_shared<TestClass>();
handler = std::bind(sigIntHandler, testClass, std::placeholders::_1);
std::signal(SIGINT, [](int n){ handler(n);});
return 0;
}
I compiled this using gcc 5.2 using the -std=c++14 flag. On hitting Ctrl-C on my CentOS 7 machine, I get the following error,
terminate called after throwing an instance of 'std::system_error'
what(): Invalid argument
Aborted (core dumped)
Please help me understand what is going on.
What happens is that your main function exits immediately destroying global handler object and then testClass. Then the main thread gets blocked in TestClass::~TestClass. The signal handler ends up accessing already destroyed objects, which leads to the undefined behaviour.
The root cause is undefined object ownership due to shared pointers - you do not know what and when ends up destroying your objects.
A more general approach is to use another thread to handle all signals and block signals in all other threads. That signal handling thread then can call any functions upon receiving a signal.
You also do not need the smart pointers and function wrappers here at all.
Example:
class TestClass
{
public:
TestClass();
~TestClass();
void shutDown();
TestClass(const TestClass&) = delete;
TestClass& operator=(const TestClass&) = delete;
private:
void postResults();
std::string getResult();
void processResults();
std::mutex outQueueMutex;
std::condition_variable outQueueConditionVariable;
std::queue<std::string> outQueue;
bool stop = false;
std::thread threadA;
std::thread threadB;
};
TestClass::TestClass()
: threadA(std::thread(&TestClass::processResults, this))
, threadB(std::thread(&TestClass::postResults, this))
{}
TestClass::~TestClass() {
threadA.join();
threadB.join();
}
void TestClass::postResults() {
while(true) {
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
std::string name = "ABCDEF";
{
std::unique_lock<std::mutex> lock(outQueueMutex);
if(stop)
return;
outQueue.push(name);
outQueueConditionVariable.notify_one();
}
}
}
void TestClass::shutDown() {
std::unique_lock<std::mutex> lock(outQueueMutex);
stop = true;
outQueueConditionVariable.notify_one();
}
std::string TestClass::getResult() {
std::string result;
{
std::unique_lock<std::mutex> lock(outQueueMutex);
while(!stop && outQueue.empty())
outQueueConditionVariable.wait(lock);
if(stop)
return result;
result= outQueue.front();
outQueue.pop();
}
return result;
}
void TestClass::processResults()
{
while(true) {
const auto result = getResult();
if(result.empty())
break;
std::cout << result << std::endl;
}
}
int main() {
// Block signals in all threads.
sigset_t sigset;
sigfillset(&sigset);
::pthread_sigmask(SIG_BLOCK, &sigset, nullptr);
TestClass testClass;
std::thread signal_thread([&testClass]() {
// Unblock signals in this thread only.
sigset_t sigset;
sigfillset(&sigset);
int signo = ::sigwaitinfo(&sigset, nullptr);
if(-1 == signo)
std::abort();
std::cout << "Received signal " << signo << '\n';
testClass.shutDown();
});
signal_thread.join();
}
On your platform this signal handler is invoked when a real SIGINT signal comes. The list of functions that can be invoked inside of this signal handler is rather limited and calling anything else leads to an undefined behavior.
I am using an online C++11 compiler, link found here: cpp.sh (C++ Shell).
In my current project, I would like to have a watchdog class, to be able to check somehow the status of a thread or FSM (for example).
After some work (I'm not a C++11 guru), I finally got the code below, that compiles ok.
I also did some basic/trivial tests, but it seems the test program doesn't want to exit.
It says "Program running" and the only way to (force) exit is to hit the "Stop" button... :(
Well, my question : What am I doing wrong?
Any ideas, suggestions you can provide are highly appreciated.
Here is the full code, including my test app:
Watchdog (as MCVE):
#include <thread>
#include <atomic>
#include <chrono>
#include <condition_variable>
#include <mutex>
#include <iostream>
using namespace std::chrono;
class Watchdog
{
public:
Watchdog();
~Watchdog();
void Start(unsigned int milliseconds, std::function<void()> callback = 0);
void Stop();
void Pet();
private:
unsigned int m_interval;
std::atomic<bool> m_running;
std::thread m_thread;
std::function<void()> m_callback;
std::mutex m_mutex;
steady_clock::time_point m_lastPetTime;
std::condition_variable m_stopCondition;
void Loop();
};
Watchdog::Watchdog()
{
m_running = false;
}
Watchdog::~Watchdog()
{
Stop();
}
void Watchdog::Start(unsigned int milliseconds, std::function<void()> callback)
{
std::unique_lock<std::mutex> locker(m_mutex);
if(m_running == false)
{
m_lastPetTime = steady_clock::now();
m_interval = milliseconds;
m_callback = callback;
m_running = true;
m_thread = std::thread(&Watchdog::Loop, this);
}
}
void Watchdog::Stop()
{
std::unique_lock<std::mutex> locker(m_mutex);
if(m_running == true)
{
m_running = false;
m_stopCondition.notify_all();
m_thread.join();
}
}
void Watchdog::Pet()
{
std::unique_lock<std::mutex> locker(m_mutex);
m_lastPetTime = steady_clock::now();
m_stopCondition.notify_all();
}
void Watchdog::Loop()
{
std::unique_lock<std::mutex> locker(m_mutex);
while(m_running == true)
{
if(m_stopCondition.wait_for(locker, milliseconds(m_interval)) == std::cv_status::timeout)
{
if(m_callback != nullptr)
m_callback();
}
}
}
int main(int argc, char *argv[])
{
Watchdog wdog;
wdog.Start(3000, [] { std::cout << " WDOG TRIGGERED!!! "; });
for(auto i = 0; i < 10; i++)
{
std::cout << "[+]";
wdog.Pet();
std::this_thread::sleep_for(std::chrono::milliseconds(500));
}
}
-
You're doing a deadlock here.
void Watchdog::Stop()
{
std::unique_lock<std::mutex> locker(m_mutex);
if(m_running == true)
{
m_running = false;
m_stopCondition.notify_all();
m_thread.join();
^ ~~~~~~~~~~~~~~
m_mutex is locked; m_thread cannot continue execution
}
}
Some additional suggestion: use simple if conditions, do not compare with true or false.
I need to write a dynamic library which should export three functions:
bool init_sender(const char* ip_addr, int port);
void cleanup_sender();
void send_command(const char* cmd, int len);
init_sender should connect to server synchronously and return true / false according to whether it was success or not.
cleanup_sender should wait for all commands to be completed and then returns.
send_command should send the specified command to the server asynchronously and return as fast as possible.
So I wrote the following code:
boost::asio::io_service g_io_service;
std::unique_ptr<boost::asio::io_service::work> g_work;
boost::asio::ip::tcp::socket g_sock(g_io_service);
boost::thread g_io_service_th;
void io_service_processor()
{
g_io_service.run();
}
bool __stdcall init_sender(const char* ip_addr, int port)
{
try
{
g_work = std::make_unique<boost::asio::io_service::work>(g_io_service);
boost::asio::ip::tcp::resolver resolver(g_io_service);
boost::asio::connect(g_sock, resolver.resolve({ ip_addr, std::to_string(port) }));
g_io_service_th = boost::thread(io_service_processor);
return true;
}
catch (const std::exception& ex)
{
return false;
}
}
void __stdcall cleanup_sender()
{
g_work.reset();
if (g_io_service_th.joinable())
{
g_io_service_th.join();
}
}
void async_write_cb(
const boost::system::error_code& error,
std::size_t bytes_transferred)
{
// TODO: implement
}
void __stdcall send_command(const char* cmd, int len)
{
boost::asio::async_write(g_sock, boost::asio::buffer(cmd, len), async_write_cb);
}
As far as I knew from boost asio documentation, all my command posted by async_write function call will be executed from one single thread (the one that contains run function call -- g_io_service_th in my case). Am I right? If so, it doesn't seem to be fully asynchronous to me. What could I do to change this behavior and send several commands at the same time from several threads? Should I create boost::thread_group like this
for (int i = 0; i < pool_size; ++i)
{
_thread_group.create_thread(boost::bind(&boost::asio::io_service::run, &_io_service));
}
or is there any other way?
You're asking a bit question and there's a lot to learn. Probably the most important thing to understand is how to use a work object.
edit: reference to async_write restriction:
http://www.boost.org/doc/libs/1_59_0/doc/html/boost_asio/reference/async_write/overload1.html
quoting from the documentation:
This operation is implemented in terms of zero or more calls to the stream's async_write_some function, and is known as a composed operation. The program must ensure that the stream performs no other write operations (such as async_write, the stream's async_write_some function, or any other composed operations that perform writes) until this operation completes.
Your asio thread code should look something like this:
#include <iostream>
#include <vector>
#include <boost/asio.hpp>
#include <thread>
struct service_loop
{
using io_service = boost::asio::io_service;
io_service& get_io_service() {
return _io_service;
}
service_loop(size_t threads = 1)
: _strand(_io_service)
, _work(_io_service)
, _socket(_io_service)
{
for(size_t i = 0 ; i < threads ; ++i)
add_thread();
}
~service_loop() {
stop();
}
// adding buffered sequential writes...
void write(const char* data, size_t length)
{
_strand.dispatch([this, v = std::vector<char>(data, data + length)] {
_write_buffer.insert(std::end(_write_buffer), v.begin(), v.end());
check_write();
});
}
private:
std::vector<char> _write_buffer;
bool _writing;
void check_write()
{
if (!_writing and !_write_buffer.empty()) {
auto pv = std::make_shared<std::vector<char>>(std::move(_write_buffer));
_writing = true;
_write_buffer.clear();
boost::asio::async_write(_socket,
boost::asio::buffer(*pv),
[this, pv] (const boost::system::error_code& ec, size_t written) {
_strand.dispatch(std::bind(&service_loop::handle_write,
this,
ec,
written));
});
}
}
void handle_write(const boost::system::error_code& ec, size_t written)
{
_writing = false;
if (ec) {
// handle error somehow
}
else {
check_write();
}
}
private:
io_service _io_service;
io_service::strand _strand;
io_service::work _work;
std::vector<std::thread> _threads;
boost::asio::ip::tcp::socket _socket;
void add_thread()
{
_threads.emplace_back(std::bind(&service_loop::run_thread, this));
}
void stop()
{
_io_service.stop();
for(auto& t : _threads) {
if(t.joinable()) t.join();
}
}
void run_thread()
{
while(!_io_service.stopped())
{
try {
_io_service.run();
}
catch(const std::exception& e) {
// report exceptions here
}
}
}
};
using namespace std;
auto main() -> int
{
service_loop sl;
sl.write("hello", 5);
sl.write(" world", 6);
std::this_thread::sleep_for(std::chrono::seconds(10));
return 0;
}
I am working on a Network Application using ASIO and have referred Chat-Server/Client
I have asked similar question Here
To explain better I am adding more code here:
My Cserver Class
class CServer {
private:
mutable tcp::acceptor acceptor_; // only in the listener
asio::io_service& io_;
CSerSessionsManager mng_;
std::string ip_;
std::string port_;
public:
CServer::CServer(asio::io_service& io_service, const std::string IP, const std::string port) : io_(io_service), acceptor_(io_service)
, ip_(IP), port_(port)
{
DEBUG_MSG("Listener Created");
}
~CServer()
{
DEBUG_MSG("Listener Destroyed");
acceptor_.close();
}
void initProtocol()
{
DEBUG_MSG(" Protocol Initiated");
std::array<unsigned char, 4> ip;
std::string delimiter = ".";
//Parse the IP String
size_t pos = 0;
auto i = 0;
std::string token;
while ((pos = ip_.find(delimiter)) != std::string::npos) {
token = ip_.substr(0, pos);
ip[i] = std::stoi(token);//what if stoi fails
i++;
ip_.erase(0, pos + delimiter.length());
}
ip[i] = std::stoi(ip_);
asio::ip::address_v4 address(ip);
tcp::endpoint ep(address, std::stoi(port_));
static std::mutex m;
std::unique_lock<std::mutex> lck(m, std::defer_lock);
//Critical Section start
lck.lock();
acceptor_ = tcp::acceptor(io_, ep);//Creating IOService
lck.unlock();
//Critical Section End
listen();
}
void listen()
{
DEBUG_MSG("!==============================================================!");
////Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
sessionPtr newSession = std::make_shared<CSerSession>(io_, mng_);
try
{
acceptor_.async_accept(newSession->socket(), std::bind(&CServer::handle_accept, /*shared_from_this()*/ this, newSession,
std::placeholders::_1));
///*asio::error_code ec;
//pSocket_->shutdown(asio::ip::tcp::socket::shutdown_send, ec);*/
}
catch (const std::bad_weak_ptr& e)
{
DEBUG_MSG(e.what());
throw e;
}
DEBUG_MSG("Listen Activated");
}
void handle_accept(sessionPtr newSession, const asio::error_code& error)
{
if (!acceptor_.is_open())
{
return;
}
if (!error)
{
DEBUG_MSG("Incoming Session accepted");
//Do I need a Lock here?
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
newSession->startSession();
listen();
}
else
{
DEBUG_MSG("Listen_Error");
// //throw ASIOError(Listen_Error);
DEBUG_MSG(error.message());
return;
}
}
};
My CSerSessionsManager Class
class CSerSessionsManager{
private:
std::set<sessionPtr> sessions_; //Active Sessions : Online Info
public:
CSerSessionsManager();
~CSerSessionsManager();
void addSession(sessionPtr session);
void dropSession(sessionPtr session);
};
CSerSessionsManager::CSerSessionsManager()
{
DEBUG_MSG("Construction");
}
CSerSessionsManager::~CSerSessionsManager()
{
DEBUG_MSG("Destruction");
}
void CSerSessionsManager::addSession(sessionPtr session)
{
DEBUG_MSG("Incoming Session Entry saved");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
sessions_.insert(session);
}
void CSerSessionsManager::dropSession(sessionPtr session)
{
//Properly handle Existing connections first shutdown sockets
DEBUG_MSG("Session dropped");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
std::set<sessionPtr>::iterator it;
for (it = sessions_.begin(); it != sessions_.end(); ++it)
{
if ((*it) == session)
{
sessions_.erase(session);
return;
}
}
//throw ASIOError(Session_Not_Found);
}
And my CSerSession Class
class CSerSession : public std::enable_shared_from_this < CSerSession > {
private:
mutable tcp::socket socket_; // client connection
CSerSessionsManager& manager_;
std::string ip_;
std::string port_;
CBuffer msg_;
public:
CSerSession(asio::io_service& io_service, CSerSessionsManager& mng) :
manager_(mng), socket_(io_service)
{
DEBUG_MSG("Server Session Created");
}
~CSerSession()
{
DEBUG_MSG("Server Session Destroyed");
}
void startSession()
{
DEBUG_MSG("Server Session Started");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
manager_.addSession(shared_from_this());//Multiple threads should not try adding section
read(msg_);
}
void handle_read(const asio::error_code& error /*error*/, size_t bytes_transferred /*bytes_transferred*/)
{
if (!error)
{
DEBUG_MSG("Read");
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
read(msg_);
}
else
{
DEBUG_MSG("Read Error Detected : " << error.message());
//Check If shared_from_this() is valid or not
try
{
//Check if session was already dropped e.g. server object destroying
//i.e. if session object exists
DEBUG_MSG("Dropping Session");
//if (error == asio::error::operation_aborted)
manager_.dropSession(shared_from_this());
}
catch (const std::bad_weak_ptr& e)
{
DEBUG_MSG(e.what());
throw e;
}
return;
}
}
void read(CBuffer & buff)
{
DEBUG_MSG("Read");
asio::async_read(socket_, asio::buffer(const_cast<char *> (buff.getReceived()), buff.buffsize),
std::bind(&CSerSession::handle_read, shared_from_this(),
std::placeholders::_1, std::placeholders::_2));
}
tcp::socket& socket()
{
//Critical Section
static std::mutex m;
std::lock_guard<std::mutex> lock(m);
return socket_;
}
};
I create the CServer Object in main as below:
void main()
{
try
{
asio::io_service io_service;
//CServer server(io_service, "Default", "127.0.0.1", "8000");
auto sPtr = std::make_shared<CServer>(io_service, "127.0.0.1", "8000");
sPtr->initProtocol();
//server.initProtocol();
asio::thread t(boost::bind(&asio::io_service::run, &io_service));
}
catch (...)
{
}
system("Pause");
}
The Output Log I get as below:
CSerSessionsManager::CSerSessionsManager : 183 : Construction
CServer::CServer : 239 : Listener Created
CServer::initProtocol : 250 : Protocol Initiated
CServer::listen : 288 : !==============================================================!
CSerSession::CSerSession : 108 : Server Session Created
CServer::listen : 309 : Listen Activated
CServer::~CServer : 244 : Listener Destroyed
CSerSessionsManager::~CSerSessionsManager : 188 : Destruction
CSerSession::~CSerSession : 113 : Server Session Destroyed
When CServer Object destroys associated CSerSession Object also destroys
, so while returning from ~CSerSession() It throws exception boost::exception_detail::clone_impl<boost::exception_detail::error_info_injector<std::system_error> > at memory location 0x0277F19C.
at below lines of code:
#ifndef BOOST_EXCEPTION_DISABLE
throw enable_current_exception(enable_error_info(e));
#else
throw e;
#endif
}
I tried to debug a lot and tried using signal mechanism also as discussed in HTTP Server, but I am stuck here and not able to proceed further.
The complete code can be checked here:
MyCode
How do I resolve it?
From a fixed version of the linked code: Live On Coliru I get
CSerSessionsManager : 184 : Construction
CServer : 240 : Listener Created
initProtocol : 251 : Protocol Initiated
~CServer : 245 : Listener Destroyed
~CSerSessionsManager : 189 : Destruction
NOTE: this was because I already had something listening on port 8000 (yay for error reporting!)
Did the initialization order of the fields fix it? Or is there something not running at all on my system (because of a race condition on my faster machine?).
Looks like the latter becuase on Coliru I got
CSerSessionsManager : 184 : Construction
CServer : 240 : Listener Created
initProtocol : 251 : Protocol Initiated
listen : 289 : !===================================!
CSerSession : 109 : Server Session Created
listen : 310 : Listen Activated
~CServer : 245 : Listener Destroyed
~CSerSessionsManager : 189 : Destruction
~CSerSession : 114 : Server Session Destroyed
So, let's have a closer look:
why are you parsing the IP string? That's what address_v4 is for. And ip::tcp::resolver.
DEBUG_MSG(" Protocol Initiated");
asio::ip::address_v4 address = asio::ip::address_v4::from_string(ip_);
tcp::endpoint ep(address, std::stoi(port_));
using a static mutex is rarely useful. Did you mean to synchronize access to shared resources? Then you need a shared mutex too
why are you using defer-lock? Use scopes
{
//Critical Section start
std::lock_guard<std::mutex> lck(mutex_);
acceptor_ = tcp::acceptor(io_, ep);//Creating IOService
//Critical Section End
}
the main thread just exits, never joining the io thread. At least join. Or make it properly shutdown before terminating the program:
t.join();
hungarian naming is really useless here. sPtr doesn't tell me anything. server or, if you insist, server_ptr is what you need to know.
you have out-of-bounds write here:
received_[str.size()] = '\0';
you wanted
received_[len] = '\0';
your empty doesn't need to loop
bool empty() const
{
return !received_[0];
}
why are you looping to find stuff in an ordered set?
std::set<sessionPtr>::iterator it;
for (it = sessions_.begin(); it != sessions_.end(); ++it)
{
if ((*it) == session)
{
sessions_.erase(session);
return;
}
}
should be
sessions_.erase(session);
addSession/dropSession are internally locking; you don't need to put access to them in a critical section
throw e is an antipattern; just throw; is re-throw
you have redundant tracing almost everywhere (this is what debuggers are for). E.g. DEBUG_MSG("Read")
Locking here is bogus:
tcp::socket& socket()
{
// Critical Section
std::lock_guard<std::mutex> lock(mutex_);
return socket_;
}
The reference returned will not be protected anyways, and socket is only initialized once.
all the thread locking seems redundant since there is only one service thread
CBuffer msg is a bogus parameter to read() as the same buffer is passed always. This could be plenty ok (it's in the same session), so, just use it.
this
acceptor_ = tcp::acceptor(io_, ep);
should be
acceptor_.bind(ep);
and not in a critical section (server is only created once); Hence the initProtocol function can be
void initProtocol()
{
acceptor_.bind(tcp::endpoint(asio::ip::address_v4::from_string(ip_), std::stoi(port_)));
listen();
}
in listen you're catching bad_weak_ptr which can't even occur
here:
//Do I need a Lock here?
//Critical Section
std::lock_guard<std::mutex> lock(mutex_);
newSession->startSession();
you don't need the lock. newSession was bound from a local variable. It's impossible for it to be shared unless you copied the completion handler (you didn't).
Here's a more fixed up version:
Live On Coliru
#include <iostream>
#include <boost/asio.hpp>
#include <memory>
#include <deque>
#include <set>
#include <iomanip>
#include <mutex>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#define DEBUG ON
#ifdef DEBUG
#define DEBUG_MSG(str) do {std::cout << std::setw(75) << std::left << __FUNCTION__ \
<< std::setw(3) << std::left << ":" << std::setw(5) << std::left << __LINE__ \
<< std::setw(5) << std::left << ":"\
<< std::left << str \
<< std::endl;} while( false )
#else
#define DEBUG_MSG(str) do { } while ( false )
#endif
namespace asio = boost::asio;
using asio::ip::tcp;
class CSerSession;
using sessionPtr = std::shared_ptr<CSerSession>;
class CSerSessionsManager {
private:
mutable std::mutex mutex_;
std::set<sessionPtr> sessions_; // Active Sessions : Online Info
public:
CSerSessionsManager();
~CSerSessionsManager();
void addSession(sessionPtr session);
void dropSession(sessionPtr session);
};
class CBuffer {
public:
enum { buffsize = 32 };
private:
char received_[buffsize];
public:
CBuffer() : received_{} {}
CBuffer(const std::string str)
{
// Truncate if Overflow
auto len = str.size();
if (len >= buffsize) {
len = buffsize - 1;
}
std::copy(str.begin(), str.begin() + len, received_);
received_[len] = '\0';
}
bool empty() const
{
return !received_[0];
}
const std::string getString() const { return std::string(received_); }
const char* getReceived() const { return received_; }
};
class CSerSession : public std::enable_shared_from_this<CSerSession> {
private:
mutable std::mutex mutex_;
mutable tcp::socket socket_; // client connection
CSerSessionsManager& manager_;
std::string ip_;
std::string port_;
CBuffer msg_;
public:
CSerSession(asio::io_service& io_service, CSerSessionsManager& mng) : socket_(io_service), manager_(mng)
{
DEBUG_MSG("Server Session Created");
}
~CSerSession() { DEBUG_MSG("Server Session Destroyed"); }
void startSession()
{
DEBUG_MSG("Server Session Started");
manager_.addSession(shared_from_this()); // Multiple threads should not try adding section
read();
}
tcp::socket& socket() { return socket_; }
private:
void handle_read(const boost::system::error_code& error /*error*/, size_t /*bytes_transferred*/)
{
if (!error) {
read();
} else {
DEBUG_MSG("Read Error Detected : " << error.message());
manager_.dropSession(shared_from_this()); // might throw
}
}
void read()
{
std::lock_guard<std::mutex> lock(mutex_);
DEBUG_MSG("Read");
asio::async_read(socket_, asio::buffer(const_cast<char*>(msg_.getReceived()), msg_.buffsize),
std::bind(&CSerSession::handle_read, shared_from_this(), std::placeholders::_1, std::placeholders::_2));
}
};
CSerSessionsManager::CSerSessionsManager()
{
DEBUG_MSG("Construction");
}
CSerSessionsManager::~CSerSessionsManager()
{
DEBUG_MSG("Destruction");
}
void CSerSessionsManager::addSession(sessionPtr session)
{
std::lock_guard<std::mutex> lock(mutex_);
DEBUG_MSG("Incoming Session Entry saved");
sessions_.insert(session);
}
void CSerSessionsManager::dropSession(sessionPtr session)
{
std::lock_guard<std::mutex> lock(mutex_);
DEBUG_MSG("Session dropped");
sessions_.erase(session);
}
class CServer {
private:
mutable std::mutex mutex_;
asio::io_service& io_;
mutable tcp::acceptor acceptor_; // only in the listener
CSerSessionsManager mng_;
public:
CServer(asio::io_service& io_service, const std::string& IP, int port)
: io_(io_service), acceptor_(io_, tcp::endpoint(asio::ip::address::from_string(IP), port))
{
DEBUG_MSG("Listener Created");
}
~CServer()
{
DEBUG_MSG("Listener Destroyed");
acceptor_.close(); // likely to be redundant
}
void initProtocol()
{
listen();
}
private:
void listen()
{
DEBUG_MSG("!==============================================================!");
sessionPtr newSession = std::make_shared<CSerSession>(io_, mng_);
std::lock_guard<std::mutex> lock(mutex_);
acceptor_.async_accept(newSession->socket(), std::bind(&CServer::handle_accept, this, newSession,
std::placeholders::_1));
}
void handle_accept(sessionPtr newSession, const boost::system::error_code& error)
{
if (error || !acceptor_.is_open()) {
DEBUG_MSG("Listen_Error");
DEBUG_MSG(error.message());
return;
}
DEBUG_MSG("Incoming Session accepted");
newSession->startSession();
listen();
}
};
int main()
{
try
{
asio::io_service io_service;
auto server = std::make_shared<CServer>(io_service, "127.0.0.1", 8973);
server->initProtocol();
boost::thread t(boost::bind(&asio::io_service::run, &io_service));
boost::this_thread::sleep_for(boost::chrono::seconds(3));
t.join();
}
catch (...)
{
}
}
Prints (for a single connection):
CSerSessionsManager : 123 : Construction
CServer : 156 : Listener Created
listen : 173 : !==============================================================!
CSerSession : 86 : Server Session Created
handle_accept : 190 : Incoming Session accepted
startSession : 93 : Server Session Started
addSession : 134 : Incoming Session Entry saved
read : 114 : Read
listen : 173 : !==============================================================!
CSerSession : 86 : Server Session Created
handle_read : 106 : Read Error Detected : End of file
dropSession : 141 : Session dropped
~CSerSession : 89 : Server Session Destroyed
I'm very beginner with boost::asio, so please help me.
I need write single-threaded TCP server . Server should accept client connections and continuously read from client sockets for input data. Periodically server should send data to clients . So I have some kind of problem - all examples describe case when we always have loop
async_receive()
on_receive() -> async_write()
on_write() -> goto 1 :)
So my decision was to use timer for checking for data to be send to socket.
I wrote test server and have very strange behavior - it's work ok if clients connected, do something and disconnected one after another with some time delta . But if all clients disconnected simultaneously I have
situation when timer handler try to use member classes of already DESTROYED object (locking critical section).
I can't describe why ! Please help !
[This video show how it's reproduced] (http://www.youtube.com/watch?v=NMWkD7rqf7Y&feature=youtu.be "1080p" )
Thank you !
#include <boost/none.hpp>
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/enable_shared_from_this.hpp>
#include <iostream>
using namespace boost::asio;
using namespace boost::posix_time;
class CIncommingConnection ;
typedef boost::shared_ptr<CIncommingConnection> CIncommingConnectionPtr;
struct IIncomingServer
{
virtual void OnData(CIncommingConnectionPtr pConn, const char *pData, size_t bytes) = 0;
virtual void OnConnected(CIncommingConnectionPtr pConn) = 0;
virtual void OnDisconnected(const boost::system::error_code& err, CIncommingConnectionPtr pConn) = 0;
};
class CAutoLock
{
public:
CAutoLock(CRITICAL_SECTION &cs) :
m_cs(cs)
{
::EnterCriticalSection(&m_cs);
}
~CAutoLock()
{
::LeaveCriticalSection(&m_cs);
}
private:
CRITICAL_SECTION &m_cs;
};
class CIncommingConnection : public boost::enable_shared_from_this<CIncommingConnection>
,boost::noncopyable
{
public:
CIncommingConnection(const std::string sPeerName, boost::asio::io_service &service, IIncomingServer *pServer) :
m_service(service)
,sock_(service)
,m_sPeerName(sPeerName)
,m_pServer(pServer)
,m_timer(service)
{
::InitializeCriticalSection(&m_cs);
std::cout << "CIncommingConnection()" << std::endl ;
}
~CIncommingConnection()
{
std::cout << "CIncommingConnection()~" << std::endl ;
::DeleteCriticalSection(&m_cs);
}
ip::tcp::socket & sock()
{
return sock_;
}
void start()
{
m_pServer->OnConnected(shared_from_this());
do_read();
wait_for_outgoingdata();
}
private:
void stop()
{
sock_.close();
m_timer.cancel();
}
void do_read()
{
sock_.async_receive(buffer(read_buffer_), boost::bind(&CIncommingConnection::handler_read, this, _1, _2) );
}
void do_error(const boost::system::error_code& error)
{
CIncommingConnectionPtr pConn = shared_from_this();
stop() ;
m_pServer->OnDisconnected(error, pConn);
}
void handler_read(const boost::system::error_code& error, std::size_t bytes)
{
if (error)
{
do_error(error);
return ;
}
CIncommingConnectionPtr pConn = shared_from_this() ;
m_pServer->OnData(pConn, read_buffer_, bytes);
do_read();
}
void wait_for_outgoingdata()
{
m_timer.expires_from_now( boost::posix_time::millisec( 100 ) );
m_timer.async_wait( boost::bind( &CIncommingConnection::on_output_queue_timer, this, _1 ) );
}
void on_output_queue_timer(const boost::system::error_code& error)
{
if (error == boost::asio::error::operation_aborted)
{
return ;
}
CAutoLock oLock(m_cs);
if (!m_sOutBuf.empty())
sock_.async_send(buffer(m_sOutBuf), boost::bind(&CIncommingConnection::handler_write, this, _1, _2) );
else
wait_for_outgoingdata();
}
void handler_write(const boost::system::error_code& error, std::size_t bytes)
{
if (error)
return ;
if (bytes)
{
m_sOutBuf = m_sOutBuf.substr(bytes, m_sOutBuf.length()-bytes);
}
wait_for_outgoingdata();
}
private:
ip::tcp::socket sock_;
enum { max_msg = 1024 };
char read_buffer_[max_msg];
char write_buffer_[max_msg];
boost::asio::io_service &m_service ;
std::string m_sPeerName ;
std::string m_sOutBuf;
CRITICAL_SECTION m_cs ;
IIncomingServer *m_pServer;
boost::asio::deadline_timer m_timer;
};
class CIncomingServer : public boost::enable_shared_from_this<CIncomingServer>
, public IIncomingServer
, boost::noncopyable
{
public:
CIncomingServer(boost::asio::io_service &service,
unsigned int port,
bool bAllowManyConnections,
const std::string sPeerName) :
m_acceptor (service, ip::tcp::endpoint(ip::tcp::v4(), port), false)
,m_sPeerName(sPeerName)
,m_port(port)
,m_service(service)
,m_timer(service)
,m_bAllowManyConnections(bAllowManyConnections)
{
}
~CIncomingServer()
{
}
void run()
{
CIncommingConnectionPtr pConn (new CIncommingConnection(m_sPeerName, m_service, this));
m_clients.push_back( pConn );
m_acceptor.async_accept(pConn->sock(), boost::bind(&CIncomingServer::handle_accept, this, _1));
m_timer.expires_from_now( boost::posix_time::millisec( 500 ) );
m_timer.async_wait( boost::bind( &CIncomingServer::on_timer, this ) );
}
private:
void handle_accept(const boost::system::error_code & err)
{
m_clients.back()->start();
CIncommingConnectionPtr pConnNew (new CIncommingConnection(m_sPeerName, m_service, this));
m_clients.push_back( pConnNew );
m_acceptor.async_accept(pConnNew->sock(), boost::bind(&CIncomingServer::handle_accept, this, _1));
}
//IIncomingServer
virtual void OnData(CIncommingConnectionPtr pConn, const char *pData, size_t bytes)
{
std::cout << "Data received" << std::endl ;
}
virtual void OnConnected(CIncommingConnectionPtr pConn)
{
std::cout << "Client connected" << std::endl ;
}
virtual void OnDisconnected(const boost::system::error_code& err, CIncommingConnectionPtr pConn)
{
std::cout << "Client disconnected" << std::endl ;
auto it = std::find(m_clients.begin(), m_clients.end(), pConn) ;
if (it != m_clients.end())
{
m_clients.erase(it);
}
}
void on_timer()
{
//if (NeedTerminate())
//{
// m_service.stop();
// return ;
//}
m_timer.expires_from_now( boost::posix_time::millisec( 500 ) );
m_timer.async_wait( boost::bind( &CIncomingServer::on_timer, this ) );
}
private:
ip::tcp::acceptor m_acceptor ;
std::vector<CIncommingConnectionPtr> m_clients;
std::string m_sPeerName ;
unsigned int m_port ;
boost::asio::io_service &m_service ;
boost::asio::deadline_timer m_timer;
bool m_bAllowManyConnections;
};
int _tmain(int argc, _TCHAR* argv[])
{
boost::asio::io_service service ;
boost::shared_ptr<CIncomingServer> pServer;
try
{
pServer.reset( new CIncomingServer(service, 8000, false, "BS Server"));
pServer->run();
}
catch (const boost::system::system_error &err)
{
std::cout << "Error : " << err.what() << std::endl ;
return 0 ;
}
service.run();
return 0 ;
}
Long story short: you should bind the completion handlers to a shared_ptr returned from shared_from_this(), not to plain this (so called shared_from_this idiom). This way you ensure the correct automatic management of your connection objects lifespan.
Technically, the following happens now: do_error causes 2 actions to take place:
timer cancellation (which is asynchronous operation) removal of
CIncommingConnectionPtr from a container (which is synchronous
operation).
At the point (2) the connection gets destroyed, as there are no other shared_ptrs holding it. Now the timer completion handler comes... Crash!