I just simply get packets from network, and Enqueue them in one thread and then consume this packets (Dequeue) in an other thread.
So i decide to use boost library to make a shared queue based on
https://www.quantnet.com/cplusplus-multithreading-boost/
template <typename T>
class SynchronisedQueue
{
private:
std::queue<T> m_queue; // Use STL queue to store data
boost::mutex m_mutex; // The mutex to synchronise on
boost::condition_variable m_cond;// The condition to wait for
public:
// Add data to the queue and notify others
void Enqueue(const T& data)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// Add the data to the queue
m_queue.push(data);
// Notify others that data is ready
m_cond.notify_one();
} // Lock is automatically released here
// Get data from the queue. Wait for data if not available
T Dequeue()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// When there is no data, wait till someone fills it.
// Lock is automatically released in the wait and obtained
// again after the wait
while (m_queue.size()==0) m_cond.wait(lock);
// Retrieve the data from the queue
T result=m_queue.front(); m_queue.pop();
return result;
} // Lock is automatically released here
};
The problem is , while not getting any data, Dequeue() method
blocks my consumer thread, and when i want to terminate consumer
thread i can not able to end it or stop it sometimes.
What is the suggested way to end blocking of Dequeue(), so that i can safely terminate the thread that consume packets?
Any ideas suggestions?
PS: The site https://www.quantnet.com/cplusplus-multithreading-boost/ use "boost::this_thread::interruption_point();" for stopping consumer thread ... Because of my legacy code structure this is not possible for me...
Based on Answer I update Shared Queue like this:
#include <queue>
#include <boost/thread.hpp>
template <typename T>
class SynchronisedQueue
{
public:
SynchronisedQueue()
{
RequestToEnd = false;
EnqueueData = true;
}
void Enqueue(const T& data)
{
boost::unique_lock<boost::mutex> lock(m_mutex);
if(EnqueueData)
{
m_queue.push(data);
m_cond.notify_one();
}
}
bool TryDequeue(T& result)
{
boost::unique_lock<boost::mutex> lock(m_mutex);
while (m_queue.empty() && (! RequestToEnd))
{
m_cond.wait(lock);
}
if( RequestToEnd )
{
DoEndActions();
return false;
}
result= m_queue.front(); m_queue.pop();
return true;
}
void StopQueue()
{
RequestToEnd = true;
Enqueue(NULL);
}
int Size()
{
boost::unique_lock<boost::mutex> lock(m_mutex);
return m_queue.size();
}
private:
void DoEndActions()
{
EnqueueData = false;
while (!m_queue.empty())
{
m_queue.pop();
}
}
std::queue<T> m_queue; // Use STL queue to store data
boost::mutex m_mutex; // The mutex to synchronise on
boost::condition_variable m_cond; // The condition to wait for
bool RequestToEnd;
bool EnqueueData;
};
And Here is my Test Drive:
#include <iostream>
#include <string>
#include "SynchronisedQueue.h"
using namespace std;
SynchronisedQueue<int> MyQueue;
void InsertToQueue()
{
int i= 0;
while(true)
{
MyQueue.Enqueue(++i);
}
}
void ConsumeFromQueue()
{
while(true)
{
int number;
cout << "Now try to dequeue" << endl;
bool success = MyQueue.TryDequeue(number);
if(success)
{
cout << "value is " << number << endl;
}
else
{
cout << " queue is stopped" << endl;
break;
}
}
cout << "Que size is : " << MyQueue.Size() << endl;
}
int main()
{
cout << "Test Started" << endl;
boost::thread startInsertIntoQueue = boost::thread(InsertToQueue);
boost::thread consumeFromQueue = boost::thread(ConsumeFromQueue);
boost::this_thread::sleep(boost::posix_time::seconds(5)); //After 5 seconds
MyQueue.StopQueue();
int endMain;
cin >> endMain;
return 0;
}
For now it seems to work...Based on new suggestions:
i change Stop Method as:
void StopQueue()
{
boost::unique_lock<boost::mutex> lock(m_mutex);
RequestToEnd = true;
m_cond.notify_one();
}
2 easy solutions to let the thread end:
send an end message on the queue.
add another condition to the condition variable to command to end
while(queue.empty() && (! RequestToEnd)) m_cond.wait(lock);
if (RequestToEnd) { doEndActions(); }
else { T result=m_queue.front(); m_queue.pop(); return result; }
First, do you really need to terminate the thread? If not, don't.
If you do have to, then just queue it a suicide pill. I usually send a NULL cast to T. The thread checks T and, if NULL, cleans up, returns and so dies.
Also, you may need to purge the queue first by removing and delete()ing all the items.
Another option that should be considered is not to block infinitely in threads. In other words, add a time out to your blocking calls like so:
bool TryDequeue(T& result, boost::chrono::milliseconds timeout)
{
boost::unique_lock<boost::mutex> lock(m_mutex);
boost::chrono::system_clock::time_point timeLimit =
boost::chrono::system_clock::now() + timeout;
while (m_queue.empty())
{
if (m_cond.wait_until(lock, timeLimit) ==
boost::condition_variable::cv_status::timeout)
{
return false;
}
}
result = m_queue.front(); m_queue.pop();
return true;
}
Then in your thread, just have a variable to indicate if the thread is still running (I took the liberty to make your consumer into a class):
class Consumer
{
public:
boost::shared_ptr<Consumer> createConsumer()
{
boost::shared_ptr<Consumer> ret(new Consumer());
ret->_consumeFromQueue = boost::thread(&Consumer::ConsumeFromQueue, ret.get());
return ret;
}
protected:
Consumer()
: _threadRunning(true)
{
}
~Consumer()
{
_threadRunning = false;
_consumeFromQueue.join();
}
void ConsumeFromQueue()
{
while(_threadRunning == true)
{
int number;
cout << "Now try to dequeue" << endl;
bool success = MyQueue.TryDequeue(number);
if(success)
{
cout << "value is " << number << endl;
}
else
{
cout << " queue is stopped" << endl;
break;
}
}
cout << "Que size is : " << MyQueue.Size() << endl;
}
bool _threadRunning;
boost::thread _consumeFromQueue;
}
There is no need to hack your queue class just so it can be used in a thread, give it normal interfaces with timeouts, and then use it in the proper way based on the use case.
I give more details on why this is a good pattern to follow for threads here:
http://blog.chrisd.info/how-to-run-threads/
Related
I'm trying to solve a dinning philosophers problem using chandy-misra algorithm. More explanation here: https://en.wikipedia.org/wiki/Dining_philosophers_problem
I'm using one mutex to lock the modified variables and another with condition variable to notify when the fork is free to use.
I can't see the reason why all my philosophers are eating at the same time - they are not waiting for forks other at all. It seems like I'm using mutexes wrong.
Philosopher thread:
void philosopher::dine() {
while(!is_initialized); // here threads waits until all other philosophers are initialized
while(!is_stopped) {
eat();
think(); // here just sleeps for a few seconds
}
}
Eat method:
void philosopher::eat() {
left_fork.request(index);
right_fork.request(index);
std::lock(right_fork.get_mutex(), left_fork.get_mutex());
std::lock_guard<std::mutex> l1( right_fork.get_mutex(), std::adopt_lock );
std::lock_guard<std::mutex> l2( left_fork.get_mutex(), std::adopt_lock );
int num = distribution(mt);
std::cout << "Philsopher " << index << " eats for " << num
<< "seconds." << std::endl;
sleep(num);
right_fork.free();
left_fork.free();
}
How fork class looks:
enum fork_state {
CLEAN, DIRTY
};
class fork_t {
int index;
int owner_id;
mutable std::mutex condition_m;
std::mutex owner_m;
std::condition_variable condition;
public:
fork_t(int _index,int _owner_id);
fork_t(const fork_t &f);
void request(int phil_req);
void free();
std::mutex &get_mutex() { return owner_m; }
fork_t& operator=(fork_t const &f);
};
void fork_t::request(int phil_req) {
while (owner_id != phil_req ) {
std::unique_lock<std::mutex> l(condition_m);
if(state == DIRTY) {
std::lock_guard<std::mutex> lock(owner_m);
state = CLEAN;
owner_id = phil_req;
} else {
while(state == CLEAN) {
std::cout<<"Philosopher " << phil_req << " is waiting for"<< index <<std::endl;
condition.wait(l);
}
}
}
}
void fork_t::free() {
state = DIRTY;
condition.notify_one();
}
At the start all forks are given to philosophers with lower id.
I would be grateful for any tips.
This code is referred from the following link
I am really not able to make out this error and seeing it for the 1st time
Entire Code
#include <boost/thread/thread.hpp>
#include <boost/lockfree/queue.hpp>
#include <iostream>
#include <queue>
#include <boost/atomic.hpp>
template<typename QueueContents, template<typename QueueContents> class QueueType>
class WorkerThread
{
public:
WorkerThread(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: m_Queue(Queue) // queue shared with consumer
, m_Delay(Delay) // delay for simulating heavy work
, m_NoOfIt(NoOfIt) // number of work items to produce
{}
void Start() // start work
{
m_Thread = boost::thread(&WorkerThread::Work, this);
}
void Stop() // interrupt work
{
m_Thread.interrupt();
}
virtual void Work() = 0;
void WaitUntilFinished()
{
m_Thread.join();
}
protected:
int m_NoOfIt;
int m_Delay;
QueueType<QueueContents> &m_Queue;
private:
boost::thread m_Thread;
};
template<typename QueueContents, template<typename QueueContents> class QueueType>
class Producer : public WorkerThread<QueueContents, QueueType>
{
public:
Producer(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: WorkerThread<QueueContents, QueueType>(Queue, NoOfIt, Delay)
{}
void Work()
{
for (QueueContents i=0; i < this->m_NoOfIt; ++i)
{
// send numbers 0 to 999 into the queue
boost::this_thread::sleep(boost::posix_time::milliseconds(this->m_Delay));
std::ostringstream msg;
msg << "[PRODUCER] Produced value " << i << std::endl;
std::cout << msg.str();
this->m_Queue.Push(i);
}
}
};
// New BSD License
class ResetableEvent
{
bool m_EventStatus;
bool m_AutoResetEvent;
boost::condition_variable m_Signal;
boost::mutex m_Mutex;
public:
explicit ResetableEvent(bool _auto_reset = false)
: m_EventStatus(false)
, m_AutoResetEvent(_auto_reset)
{}
void wait() // wait for event
{
boost::unique_lock<boost::mutex> lock(m_Mutex);
if (m_EventStatus)
{
if (m_AutoResetEvent)
m_EventStatus = false;
return;
}
do
{
m_Signal.wait(lock);
} while(!m_EventStatus);
if (m_AutoResetEvent)
m_EventStatus = false;
}
void set() // this notifies threads waiting for this event
// and makes sure that other threads about to wait
// can immediately proceed
{
boost::lock_guard<boost::mutex> lock(m_Mutex);
m_EventStatus = true;
m_Signal.notify_one();
}
void reset() // reset event: threads who will wait for this
// event will be blocked
{
boost::lock_guard<boost::mutex> lock(m_Mutex);
m_EventStatus = false;
}
};
// Queue class that can be used in multithreading context
template <typename T>
class BoundedThreadSafeQueueSignalWorkloadDone
{
private:
std::queue<T> m_queue; // Use STL queue to store data
boost::mutex m_mutex; // The mutex to synchronise on
boost::condition_variable m_QueueHasData; // The condition to wait for if queue is empty
boost::condition_variable m_QueueHasRoom; // The condition to wait for if queue is full
ResetableEvent m_ProcessingDone; // The conditon to wait for if queue is empty
// and workload is fully processed
unsigned int m_Size; // max queue size
public:
BoundedThreadSafeQueueSignalWorkloadDone(unsigned int Size)
: m_Size(Size)
{
m_ProcessingDone.set();
}
bool Empty()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
return m_queue.empty();
}
bool Full()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
return m_queue.size() >= m_Size;
}
// Push new data on the queue and notify other threads
// waiting for data in this queue
bool TryPush(const T &data)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// Indicate that workload processing is busy
// Fail if queue full
if (m_queue.size() >= m_Size) return false;
// Indicate that workload is being processed
m_ProcessingDone.reset();
// Add the data to the queue
m_queue.push(data);
// Notify others that data is ready
m_QueueHasData.notify_one();
return true;
} // Lock is automatically released here
// Try to push data in queue
// Wait until room in queue
void Push(const T &data)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// While queue is full, wait
// warning: do not replace m_queue.size() == m_Size with Full():
// it will deadlock due to trying to acquire the same m_mutex
// Push has already acquired
while (m_queue.size() >= m_Size) m_QueueHasRoom.wait(lock);
// Indicate that workload is being processed
m_ProcessingDone.reset();
// Now push the data
m_queue.push(data);
// And warn threads that are waiting for data
m_QueueHasData.notify_one();
}
// Get data from the queue.
// Return false if no data available
bool TryPop(T &result)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// When there is no data, return false
if (m_queue.size() == 0) return false;
// Otherwise return the data
// Retrieve the data from the queue
result=m_queue.front(); m_queue.pop();
// Warn threads who are waiting to push data
m_QueueHasRoom.notify_one();
return true;
// Lock is automatically released here
}
// Get data from the queue.
// Wait for data if not available
void WaitAndPop(T &result)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// When there is no data, wait till someone fills it.
// Lock is automatically released in the wait and obtained
// again after the wait
while (m_queue.size() == 0) m_QueueHasData.wait(lock);
// Retrieve the data from the queue
result=m_queue.front(); m_queue.pop();
// Warn threads who are waiting to push data
m_QueueHasRoom.notify_one();
} // Lock is automatically released here
void ProcessingFinished()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// If no new work scheduled,
// we can signal the queue as completely idle
if (m_queue.empty())
m_ProcessingDone.set();
else
std::cout << "[QUEUE] ProcessingFinished but queue not empty. ProcessingDone flag remains low." << std::endl;
}
void WaitUntilCompletelyIdle()
{
std::cout << "[QUEUE] Wait until idle" << std::endl;
m_ProcessingDone.wait();
std::cout << "[QUEUE] Consumer is completely idle" << std::endl;
}
};
template<typename QueueContents, template<typename QueueContents> class QueueType>
class Producer : public WorkerThread<QueueContents, QueueType>
{
public:
Producer(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: WorkerThread<QueueContents, QueueType>(Queue, NoOfIt, Delay)
{}
void Work()
{
for (QueueContents i=0; i< this->m_NoOfIt; ++i)
{
// send numbers 0 to 999 into the queue
boost::this_thread::sleep(boost::posix_time::milliseconds(this->m_Delay));
std::ostringstream msg;
msg << "[PRODUCER] Produced value " << i << std::endl;
std::cout << msg.str();
this->m_Queue.Push(i);
}
}
};
template<typename QueueContents, template<typename QueueContents> class QueueType>
class Consumer : public WorkerThread<QueueContents, QueueType>
{
public:
Consumer(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: WorkerThread<QueueContents, QueueType>(Queue, NoOfIt, Delay)
{}
void Work()
{
for (QueueContents i=0; i< this->m_NoOfIt; ++i)
{
// read data from the queue,
// waiting until data available
int num = -1;
this->m_Queue.WaitAndPop(num);
std::ostringstream msg;
msg << "[CONSUMER] Consumed value " << num << std::endl;
std::cout << msg.str();
boost::this_thread::sleep(boost::posix_time::milliseconds(this->m_Delay)); // work hard
this->m_Queue.ProcessingFinished(); // tell the queue we're done working hard
std::ostringstream msg2;
msg2 << "[CONSUMER] Consumption of value " << num << " completely handled." << std::endl;
std::cout << msg2.str();
}
}
};
int main()
{
std::cout << "[MAIN] About to construct queue" << std::endl;
BoundedThreadSafeQueueSignalWorkloadDone<int> Q(3);
std::cout << "[MAIN] About to construct producer" << std::endl;
Producer<int, BoundedThreadSafeQueueSignalWorkloadDone> P(Q, 10, 0);
std::cout << "[MAIN] About to construct consumer" << std::endl;
Consumer<int, BoundedThreadSafeQueueSignalWorkloadDone> C(Q, 10, 100);
std::cout << "[MAIN] About to start producer" << std::endl;
P.Start();
std::cout << "[MAIN] About to start consumer" << std::endl;
C.Start();
for (unsigned int i=0; i<20;++i)
{
C.WaitUntilCompletelyIdle();
boost::this_thread::sleep(boost::posix_time::milliseconds(2));
}
std::cout << "[MAIN] Queue should be empty after all threads finished: " << Q.Empty() << std::endl;
std::cout << "[MAIN] Waiting for producer to finish" << std::endl;
P.WaitUntilFinished();
std::cout << "[MAIN] Waiting for consumer to finish" << std::endl;
C.WaitUntilFinished();
return 0;
}
Error
main.cpp:6:43: error: declaration of template parameter
‘QueueContents’ shadows template parameter template<typename
QueueContents, template<typename QueueContents> class QueueType>
The code is wrong, it violates the rule [temp.local]/6 which disallows re-use of a template parameter name for any other purpose.
A template-parameter shall not be redeclared within its scope (including nested scopes).
The author probably used GCC 5, which didn't implement the rule, but since GCC 6 it is implemented.
Live demo
As a quick fix just remove the template-template argument name. It is not used anyway.
template<typename QueueContents, template<typename> class QueueType>
class WorkerThread
. . .
My little consumer-producer problem had me stumped for some time. I didn't want an implementation where one producer pushes some data round-robin to the consumers, filling up their queues of data respectively.
I wanted to have one producer, x consumers, but the producer waits with producing new data until a consumer is free again. In my example there are 3 consumers so the producer creates a maximum of 3 objects of data at any given time. Since I don't like polling, the consumers were supposed to notify the producer when they are done. Sounds simple, but the solution I found doesn't please me. First the code.
#include "stdafx.h"
#include <mutex>
#include <iostream>
#include <future>
#include <map>
#include <atomic>
std::atomic_int totalconsumed;
class producer {
using runningmap_t = std::map<int, std::pair<std::future<void>, bool>>;
// Secure the map of futures.
std::mutex mutex_;
runningmap_t running_;
// Used for finished notification
std::mutex waitermutex_;
std::condition_variable waiter_;
// The magic number to limit the producer.
std::atomic<int> count_;
bool can_run();
void clean();
// Fake a source, e.g. filesystem scan.
int fakeiter;
int next();
bool has_next() const;
public:
producer() : fakeiter(50) {}
void run();
void notify(int value);
void wait();
};
class consumer {
producer& producer_;
public:
consumer(producer& producer) : producer_(producer) {}
void run(int value) {
std::this_thread::sleep_for(std::chrono::milliseconds(42));
std::cout << "Consumed " << value << " on (" << std::this_thread::get_id() << ")" << std::endl;
totalconsumed++;
producer_.notify(value);
}
};
// Only if less than three threads are active, another gets to run.
bool producer::can_run() { return count_.load() < 3; }
// Verify if there's something to consume
bool producer::has_next() const { return 0 != fakeiter; }
// Produce the next value for consumption.
int producer::next() { return --fakeiter; }
// Remove the futures that have reported to be finished.
void producer::clean()
{
for (auto it = running_.begin(); it != running_.end(); ) {
if (it->second.second) {
it = running_.erase(it);
}
else {
++it;
}
}
}
// Runs the producer. Creates a new consumer for every produced value. Max 3 at a time.
void producer::run()
{
while (has_next()) {
if (can_run()) {
auto c = next();
count_++;
auto future = std::async(&consumer::run, consumer(*this), c);
std::unique_lock<std::mutex> lock(mutex_);
running_[c] = std::make_pair(std::move(future), false);
clean();
}
else {
std::unique_lock<std::mutex> lock(waitermutex_);
waiter_.wait(lock);
}
}
}
// Consumers diligently tell the producer that they are finished.
void producer::notify(int value)
{
count_--;
mutex_.lock();
running_[value].second = true;
mutex_.unlock();
std::unique_lock<std::mutex> waiterlock(waitermutex_);
waiter_.notify_all();
}
// Wait for all consumers to finish.
void producer::wait()
{
while (!running_.empty()) {
mutex_.lock();
clean();
mutex_.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
// Looks like the application entry point.
int main()
{
producer p;
std::thread pthread(&producer::run, &p);
pthread.join();
p.wait();
std::cout << std::endl << std::endl << "Total consumed " << totalconsumed.load() << std::endl;
return 0;
}
The part I don't like is the list of values mapped to the futures, called running_. I need to keep the future around until the consumer is actually done. I can't remove the future from the map in the notify method or else I'll kill the thread that is currently calling notify.
Am I missing something that could simplify this construct?
template<class T>
struct slotted_data {
std::size_t I;
T t;
};
template<class T>
using sink = std::function<void(T)>;
template<class T, std::size_t N>
struct async_slots {
bool produce( slotted_data<T> data ) {
if (terminate || data.I>=N) return false;
{
auto l = lock();
if (slots[data.I]) return false;
slots[data.I] = std::move(data.t);
}
cv.notify_one();
return true;
}
// rare use of non-lambda cv.wait in the wild!
bool consume(sink<slotted_data<T>> f) {
auto l = lock();
while(!terminate) {
for (auto& slot:slots) {
if (slot) {
auto r = std::move(*slot);
slot = std::nullopt;
f({std::size_t(&slot-slots.data()), std::move(r)}); // invoke in lock
return true;
}
}
cv.wait(l);
}
return false;
}
// easier and safer version:
std::optional<slotted_data<T>> consume() {
std::optional<slotted_data<T>> r;
bool worked = consume([&](auto&& data) { r = std::move(data); });
if (!worked) return {};
return r;
}
void finish() {
{
auto l = lock();
terminate = true;
}
cv.notify_all();
}
private:
auto lock() { return std::unique_lock<std::mutex>(m); }
std::mutex m;
std::condition_variable cv;
std::array< std::optional<T>, N > slots;
bool terminate = false;
};
async_slots provides a fixed number of slots and an awaitable consume. If you try to produce two things in the same slot, the producer function returns false and ignores you.
consume invokes the sink of the data inside the mutex in a continuation passing style. This permits atomic consumption.
We want to invert producer and consumer:
template<class T, std::size_t N>
struct slotted_consumer {
bool consume( std::size_t I, sink<T> sink ) {
std::optional<T> data;
std::condition_variable cv;
std::mutex m;
bool worked = slots.produce(
{
I,
[&](auto&& t){
{
std::unique_lock<std::mutex> l(m);
data.emplace(std::move(t));
}
cv.notify_one();
}
}
);
if (!worked) return false;
std::unique_lock<std::mutex> l(m);
cv.wait(l, [&]()->bool{
return (bool)data;
});
sink( std::move(*data) );
return true;
}
bool produce( T t ) {
return slots.consume(
[&](auto&& f) {
f.t( std::move(t) );
}
);
}
void finish() {
slots.finish();
}
private:
async_slots< sink<T>, N > slots;
};
we have to take some care to execute sink in a context where we are not holding the mutex of async_slots, which is why consume above is so strange.
Live example.
You share a slotted_consumer< int, 3 > slots. The producing thread repeatedly calls slots.produce(42);. It blocks until a new consumer lines up.
Consumer #2 calls slots.consume( 2, [&](int x){ /* code to consume x */ } ), and #1 and #0 pass their slot numbers as well.
All 3 consumers can be waiting for the next production. The above system defaults to feeding #0 first if it is waiting for more work; we could make it "fair" at a cost of keeping a bit more state.
I am testing how to push objects waiting on condition_variables in a queue. I want to execute the threads as per my wish because they will be in critical sections later. Nothing is printed from the threads, what could be wrong ?
mutex print_mu;
void print(function<void()> func)
{
lock_guard<mutex> lock(print_mu);
func();
}
unsigned int generate_id()
{
static unsigned int id = 1;
return id++;
}
class foo
{
unsigned int id_;
mutex mu_;
condition_variable cv_;
bool signal_;
bool& kill_;
public:
foo(bool kill)
:kill_(kill)
, signal_(false)
, id_(generate_id())
{
run();
}
void set()
{
signal_ = true;
}
void run()
{
async(launch::async, [=]()
{
unique_lock<mutex> lock(mu_);
cv_.wait(lock, [&]() { return signal_ || kill_ ; });
if (kill_)
{
print([=](){ cout << " Thread " << id_ << " killed!" << endl; });
return;
}
print([=](){ cout << " Hello from thread " << id_ << endl; });
});
}
};
int main()
{
queue<shared_ptr<foo>> foos;
bool kill = false;
for (int i = 1; i <= 10; i++)
{
shared_ptr<foo> p = make_shared<foo>(kill);
foos.push(p);
}
this_thread::sleep_for(chrono::seconds(2));
auto p1 = foos.front();
p1->set();
foos.pop();
auto p2 = foos.front();
p2->set();
foos.pop();
this_thread::sleep_for(chrono::seconds(2));
kill = true; // terminate all waiting threads unconditionally
this_thread::sleep_for(chrono::seconds(2));
print([=](){ cout << " Main thread exits" << endl; });
return 0;
}
When a thread calls std::condition_variable::wait, it will block until another thread calls notify_one or notify_all on the same condition_variable. Since you never call notify_* on any of your condition_variables they will block forever.
Your foo::run method will also block forever, since std::future's destructor will block waiting for the result of a std::async call if it's the last std::future referencing that result. Thus your code deadlocks: your main thread is blocked waiting for your async future to finish, and your async future is blocked waiting for your main thread to signal cv_.
(Also foo::kill_ is a dangling reference. Well, it would become one if run ever returned anyway.)
I found a good implementation of boost based thread pool which is an improvement over this and this . it is very easy to understand and test. It looks like this:
#include <boost/thread/thread.hpp>
#include <boost/asio.hpp>
// the actual thread pool
struct ThreadPool {
ThreadPool(std::size_t);
template<class F>
void enqueue(F f);
~ThreadPool();
// the io_service we are wrapping
boost::asio::io_service io_service;
// dont let io_service stop
boost::shared_ptr<boost::asio::io_service::work> work;
//the threads
boost::thread_group threads;
};
// the constructor just launches some amount of workers
ThreadPool::ThreadPool(size_t nThreads)
:io_service()
,work(new boost::asio::io_service::work(io_service))
{
for ( std::size_t i = 0; i < nThreads; ++i ) {
threads.create_thread(boost::bind(&boost::asio::io_service::run, &io_service));
}
}
// add new work item to the pool
template<class F>
void ThreadPool::enqueue(F f) {
io_service.post(f);
}
// the destructor joins all threads
ThreadPool::~ThreadPool() {
work.reset();
io_service.run();
}
//tester:
void f(int i)
{
std::cout << "hello " << i << std::endl;
boost::this_thread::sleep(boost::posix_time::milliseconds(300));
std::cout << "world " << i << std::endl;
}
//it can be tested via:
int main() {
// create a thread pool of 4 worker threads
ThreadPool pool(4);
// queue a bunch of "work items"
for( int i = 0; i < 8; ++i ) {
std::cout << "task " << i << " created" << std::endl;
pool.enqueue(boost::bind(&f,i));
}
}
g++ ThreadPool-4.cpp -lboost_system -lboost_thread
Now the question:
I need to know how I can modify the implementation to be able to use this thread pool batch by batch- only when the first set of my work is fully completed by the thread pool, I need to supply the second set and so on. I tried to play with .run() and .reset() (found in the destructor) between the batch jobs but no luck:
//adding methods to the tread pool :
//reset the asio work and thread
void ThreadPool::reset(size_t nThreads){
work.reset(new boost::asio::io_service::work(io_service));
for ( std::size_t i = 0; i < nThreads; ++i ) {
threads.create_thread(boost::bind(&boost::asio::io_service::run, &io_service));
}
std::cout << "group size : " << threads.size() << std::endl;
}
//join, and even , interrupt
void ThreadPool::joinAll(){
threads.join_all();
threads.interrupt_all();
}
//tester
int main() {
// create a thread pool of 4 worker threads
ThreadPool pool(4);
// queue a bunch of "work items"
for( int i = 0; i < 20; ++i ) {
std::cout << "task " << i << " created" << std::endl;
pool.enqueue(boost::bind(&f,i));
}
//here i play with the asio work , io_service and and the thread group
pool.work.reset();
pool.io_service.run();
std::cout << "after run" << std::endl;
pool.joinAll();
std::cout << "after join all" << std::endl;
pool.reset(4);
std::cout << "new thread group size: " << pool.threads.size() << std::endl;///btw: new threa group size is 8. I expected 4!
// second batch... never completes
for( int i = 20; i < 30; ++i ) {
pool.enqueue(boost::bind(&f,i));
}
}
The second batch doesn't complete. I will appreciate if you help me fix this.
thank you
UPDATE- Solution:
based on a solution by Nik, I developed a solution using condition variable. Just add the following code to the original class:
// add new work item to the pool
template<class F>
void ThreadPool::enqueue(F f) {
{
boost::unique_lock<boost::mutex> lock(mutex_);
nTasks ++;
}
//forwarding the job to wrapper()
void (ThreadPool::*ff)(boost::tuple<F>) = &ThreadPool::wrapper<F>;
io_service.post(boost::bind(ff, this, boost::make_tuple(f))); //using a tuple seems to be the only practical way. it is mentioned in boost examples.
}
//run+notfiy
template<class F>
void ThreadPool::wrapper(boost::tuple<F> f) {
boost::get<0>(f)();//this is the task (function and its argument) that has to be executed by a thread
{
boost::unique_lock<boost::mutex> lock(mutex_);
nTasks --;
cond.notify_one();
}
}
void ThreadPool::wait(){
boost::unique_lock<boost::mutex> lock(mutex_);
while(nTasks){
cond.wait(lock);
}
}
Now you may call wait() method between batches of work.
one problem however:
Even after the last batch, I have to call pool.wait() because the thread pool's scope will end after that and thread pool's destructor will be invoked. During destruction, some of the jobs are done and it will be the time to call the .notify(). As the Threadpool::mutex during destruction is invalidated, exceptions occur during locking. your suggestion will be appreciated.
A condition variable could be used to achieve desired result.
Implement a function responsible for calling enqueue the tasks and wait on a condition variable.
Condition variable is notified when all tasks assigned to the pool are complete.
Every thread checks if the jobs are complete or not. Once all the jobs are complete condition variable is notified.
//An example of what you could try, this just an hint for what could be explored.
void jobScheduler()
{
int jobs = numberOfJobs; //this could vary and can be made shared memory
// queue a bunch of "work items"
for( int i = 0; i < jobs; ++i )
{
std::cout << "task " << i << " created" << std::endl;
pool.enqueue(boost::bind(&f,i));
}
//wait on a condition variable
boost::mutex::scoped_lock lock(the_mutex);
conditionVariable.wait(lock); //Have this varibale notified from any thread which realizes that all jobs are complete.
}
Solution 2
I have a new working solution, with some assumption about syntax of functions being called back, but that could be changed as per requirement.
Continuing on the lines of above I use condition variable for managing my tasks but with a difference.
Create a queue of jobs.
A Manager which waits for new JOBS in the queue.
Once a job is received a notification is sent to waiting manager about the same.
Worker maintains a handle to Manager. When all the tasks assigned are complete Manger is informed.
Manager on getting a call for end, stops waiting for new JOBS in queue and exits.
#include <iostream>
#include <queue>
#include <boost/thread/thread.hpp>
#include <boost/asio.hpp>
#include <boost/tuple/tuple.hpp>
#include <boost/tuple/tuple_io.hpp>
#include <boost/function.hpp>
///JOB Queue hold all jobs required to be executed
template<typename Job>
class JobQueue
{
private:
std::queue<Job> _queue;
mutable boost::mutex _mutex;
boost::condition_variable _conditionVariable;
public:
void push(Job const& job)
{
boost::mutex::scoped_lock lock(_mutex);
_queue.push(job);
lock.unlock();
_conditionVariable.notify_one();
}
bool empty() const
{
boost::mutex::scoped_lock lock(_mutex);
return _queue.empty();
}
bool tryPop(Job& poppedValue)
{
boost::mutex::scoped_lock lock(_mutex);
if(_queue.empty())
{
return false;
}
poppedValue = _queue.front();
_queue.pop();
return true;
}
void waitAndPop(Job& poppedValue)
{
boost::mutex::scoped_lock lock(_mutex);
while(_queue.empty())
{
_conditionVariable.wait(lock);
}
poppedValue = _queue.front();
_queue.pop();
}
};
///Thread pool for posting jobs to io service
class ThreadPool
{
public :
ThreadPool( int noOfThreads = 1) ;
~ThreadPool() ;
template< class func >
void post( func f ) ;
boost::asio::io_service &getIoService() ;
private :
boost::asio::io_service _ioService;
boost::asio::io_service::work _work ;
boost::thread_group _threads;
};
inline ThreadPool::ThreadPool( int noOfThreads )
: _work( _ioService )
{
for(int i = 0; i < noOfThreads ; ++i) // 4
_threads.create_thread(boost::bind(&boost::asio::io_service::run, &_ioService));
}
inline ThreadPool::~ThreadPool()
{
_ioService.stop() ;
_threads.join_all() ;
}
inline boost::asio::io_service &ThreadPool::getIoService()
{
return _ioService ;
}
template< class func >
void ThreadPool::post( func f )
{
_ioService.post( f ) ;
}
template<typename T>
class Manager;
///Worker doing some work.
template<typename T>
class Worker{
T _data;
int _taskList;
boost::mutex _mutex;
Manager<T>* _hndl;
public:
Worker(T data, int task, Manager<T>* hndle):
_data(data),
_taskList(task),
_hndl(hndle)
{
}
bool job()
{
boost::mutex::scoped_lock lock(_mutex);
std::cout<<"...Men at work..."<<++_data<<std::endl;
--_taskList;
if(taskDone())
_hndl->end();
}
bool taskDone()
{
std::cout<<"Tasks "<<_taskList<<std::endl<<std::endl;
if(_taskList == 0)
{
std::cout<<"Tasks done "<<std::endl;
return true;
}
else false;
}
};
///Job handler waits for new jobs and
///execute them as when a new job is received using Thread Pool.
//Once all jobs are done hndler exits.
template<typename T>
class Manager{
public:
typedef boost::function< bool (Worker<T>*)> Func;
Manager(int threadCount):
_threadCount(threadCount),
_isWorkCompleted(false)
{
_pool = new ThreadPool(_threadCount);
boost::thread jobRunner(&Manager::execute, this);
}
void add(Func f, Worker<T>* instance)
{
Job job(instance, f);
_jobQueue.push(job);
}
void end()
{
boost::mutex::scoped_lock lock(_mutex);
_isWorkCompleted = true;
//send a dummy job
add( NULL, NULL);
}
void workComplete()
{
std::cout<<"Job well done."<<std::endl;
}
bool isWorkDone()
{
boost::mutex::scoped_lock lock(_mutex);
if(_isWorkCompleted)
return true;
return false;
}
void execute()
{
Job job;
while(!isWorkDone())
{
_jobQueue.waitAndPop(job);
Func f = boost::get<1>(job);
Worker<T>* ptr = boost::get<0>(job);
if(f)
{
_pool->post(boost::bind(f, ptr));
}
else
break;
}
std::cout<<"Complete"<<std::endl;
}
private:
ThreadPool *_pool;
int _threadCount;
typedef boost::tuple<Worker<T>*, Func > Job;
JobQueue<Job> _jobQueue;
bool _isWorkCompleted;
boost::mutex _mutex;
};
typedef boost::function< bool (Worker<int>*)> IntFunc;
typedef boost::function< bool (Worker<char>*)> CharFunc;
int main()
{
boost::asio::io_service ioService;
Manager<int> jobHndl(2);
Worker<int> wrk1(0,4, &jobHndl);
IntFunc f= &Worker<int>::job;
jobHndl.add(f, &wrk1);
jobHndl.add(f, &wrk1);
jobHndl.add(f, &wrk1);
jobHndl.add(f, &wrk1);
Manager<char> jobHndl2(2);
Worker<char> wrk2(0,'a', &jobHndl2);
CharFunc f2= &Worker<char>::job;
jobHndl2.add(f2, &wrk2);
jobHndl2.add(f2, &wrk2);
jobHndl2.add(f2, &wrk2);
jobHndl2.add(f2, &wrk2);
ioService.run();
while(1){}
return 0;
}
The third solution is the best (easiest IMHO), the one from the asio father;
You have to understand that you will stay blocked on "Threads.join_all()" statement while there is still a thread alive. Then you can call again with other work to do.
May be an alternative is to use taskqueue "A task queue that uses a thread pool to complete tasks in parallel", you fill up the queue with your works, it ensures that there will be no more than 'x' tasks working in parallel.
Sample is easy to understand.
May be you need to add that member function to TaskQueue class in order to solve your "pool.wait()" issue:
void WaitForEmpty(){
while( NumPendingTasks() || threads_.size() ){
boost::wait_for_any(futures_.begin(), futures_.end());
}
}
Enjoy !