Here is my ThreadPool implementation. I've tried it in simple function main and cannot stop it correctly, destructor is called before threads start and whole program finish in the deadlock (on t.join()) because condition variable has been called before thread reached wait function.
Any ideas how to fix it? Or there is a better way to implement it?
ThreadPool.cpp
#include <condition_variable>
#include <future>
#include <iostream>
#include <mutex>
#include <thread>
#include <vector>
namespace Concurrency {
template <typename RetType>
class StandardThreadPool : public ThreadPool<RetType> {
private:
typedef std::function<RetType()> taskType;
ThreadSafeQueue<std::packaged_task<RetType()>> queue;
std::mutex queueMutex;
std::condition_variable queueCondition;
std::vector<std::thread> poolThreads;
std::atomic<bool> stopThreadsFlag{false};
void threadWork() {
std::cout << "thread:" << std::this_thread::get_id() << " started\n";
std::unique_lock<std::mutex> lock(queueMutex);
while (true) {
queueCondition.wait(lock);
if (stopThreadsFlag.load())
break;
auto task = queue.Pop();
if (task)
(*task)();
}
std::cout << "thread:" << std::this_thread::get_id() << " finished\n";
}
void initThreadPool() {
poolThreads.resize(StandardThreadPool<RetType>::maxThreads);
}
void startThreads() {
for (int i = 0; i < StandardThreadPool<RetType>::maxThreads; i++) {
poolThreads[i] =
std::thread(&StandardThreadPool<RetType>::threadWork, this);
}
}
void terminateThreads() {
stopThreadsFlag.store(true);
queueCondition.notify_all();
for (auto &t : poolThreads) {
t.join();
}
}
public:
StandardThreadPool(int maxThreads) : ThreadPool<RetType>(maxThreads) {
initThreadPool();
startThreads();
}
std::future<RetType> virtual Push(taskType &&task) override {
std::packaged_task<RetType()> pt = std::packaged_task<RetType()>(task);
auto future = pt.get_future();
queue.Push(std::move(pt));
queueCondition.notify_one();
return future;
}
~StandardThreadPool<RetType>() {
std::cout << "destructor called\n";
terminateThreads(); }
};
} // namespace Concurrency
namespace Concurrency {
template <typename T> class ThreadSafeQueue {
private:
struct node {
std::shared_ptr<T> data;
std::unique_ptr<node> next;
};
std::mutex headMutex;
std::mutex tailMutex;
std::unique_ptr<node> head;
node *tail;
node *getTail() {
std::lock_guard<std::mutex> lock(tailMutex);
return tail;
}
std::unique_ptr<node> popHead() {
std::lock_guard<std::mutex> lock(headMutex);
if (head.get() == getTail())
return nullptr;
std::unique_ptr<node> oldHead(std::move(head));
head = std::move(oldHead->next);
return oldHead;
}
public:
ThreadSafeQueue() : head(new node), tail(head.get()) {}
std::shared_ptr<T> Pop() {
auto oldHead = popHead();
return oldHead ? oldHead->data : nullptr;
}
void Push(T &newValue) {
auto newData = std::make_shared<T>(std::forward<T>(newValue));
std::unique_ptr<node> pNew(new node);
auto newTail = pNew.get();
std::lock_guard<std::mutex> lock(tailMutex);
tail->data = newData;
tail->next = std::move(pNew);
tail = newTail;
}
void Push(T &&newValue) {
auto newData = std::make_shared<T>(std::move(newValue));
std::unique_ptr<node> pNew(new node);
auto newTail = pNew.get();
std::lock_guard<std::mutex> lock(tailMutex);
tail->data = newData;
tail->next = std::move(pNew);
tail = newTail;
}
ThreadSafeQueue(const ThreadSafeQueue &) = delete;
ThreadSafeQueue &operator=(const ThreadSafeQueue &) = delete;
};
} // namespace Concurrency
#include <functional>
#include <future>
namespace Concurrency {
template <typename RetType> class ThreadPool {
public:
int maxThreads;
public:
typedef std::function<RetType()> taskType;
ThreadPool(int maxThreads):maxThreads(maxThreads){}
virtual std::future<RetType> Push(taskType &&newTask) = 0;
ThreadPool(const ThreadPool &) = delete;
ThreadPool(const ThreadPool &&) = delete;
};
} // namespace Concurrency
main.cpp
int main() {
Concurrency::StandardThreadPool<int> th(1);
auto fun = []() {
std::cout << "function running\n";
return 2;
};
th.Push(fun);
return EXIT_SUCCESS;
}
First, a correct threadsafe queue.
template<class T>
struct threadsafe_queue {
[[nodiscard]] std::optional<T> pop() {
auto l = lock();
cv.wait(l, [&]{ return is_aborted() || !data.empty(); });
if (is_aborted())
return {};
auto r = std::move(data.front());
data.pop_front();
cv.notify_all(); // for wait_until_empty
return r; // might need std::move here, depending on compiler version
}
bool push(T t) {
auto l = lock();
if (is_aborted()) return false;
data.push_back(std::move(t));
cv.notify_one();
return true;
}
void set_abort_flag() {
auto l = lock(); // still need this
aborted = true;
data.clear();
cv.notify_all();
}
[[nodiscard]] bool is_aborted() const { return aborted; }
void wait_until_empty() {
auto l = lock();
cv.wait(l, [&]{ return data.empty(); });
}
private:
std::unique_lock<std::mutex> lock() {
return std::unique_lock<std::mutex>(m);
}
std::condition_variable cv;
std::mutex m;
std::atomic<bool> aborted{false};
std::deque<T> data;
};
this handles abort and the like internally.
Our threadpool then becomes:
struct threadpool {
explicit threadpool(std::size_t count)
{
for (std::size_t i = 0; i < count; ++i) {
threads.emplace_back([&]{
// abort handled by empty pop:
while( auto f = queue.pop() ) {
(*f)();
}
});
}
}
void set_abort_flag() {
queue.set_abort_flag();
}
[[nodiscard]] bool is_aborted() const {
return queue.is_aborted();
}
~threadpool() {
queue.wait_until_empty();
queue.set_abort_flag(); // get threads to leave the queue
for (std::thread& t:threads)
t.join();
}
template<class F,
class R=typename std::result_of<F()>::type
>
std::future<R> push_task( F f ) {
std::packaged_task<R()> task( std::move(f) );
auto ret = task.get_future();
if (queue.push( std::packaged_task<void()>(std::move(task)) )) // wait, this works? Yes it does.
return ret;
else
return {}; // cannot push, already aborted
}
private:
// yes, void. This is evil but it works
threadsafe_queue<std::packaged_task<void()>> queue;
std::vector<std::thread> threads;
};
in c++11 you can swap the std::optional for std::unique_ptr. More runtime overhead.
The trick here is that a std::packaged_task<void()> can store a std::packaged_task<R()>. And we don't need the return value in the queue. So one thread pool can handle any number of different return values in tasks -- it doesn't care.
I only join the threads on thread_pool destruction. I could do it after an abort as well.
Destroying a thread_pool waits until all tasks are complete. Note that aborting a thread_pool may not abort tasks in progress. One thing that you probably want to add is the option of passing an abort API/flag to the tasks, so they can abort early if asked.
Getting this industrial scale is hard, because ideally all blocking in a task would also pay attention to the abort possibility.
Live example.
You could add a 2nd cv to notify after pops, which only wait_until_empty waits on. That might safe you some spurious wakeups.
Related
I have asked a simpler version of this question before and got the correct answer: Thread pools not working with large number of tasks
Now I am trying to run tasks from an object of a class in parallel using a thread pool. My task is simple and only prints a number for that instance of class. I am expecting numbers 0->9 get printed but instead I get some numbers get printed more than once and some numbers not printed at all. Can anyone see what I am doing wrong with creating tasks in my loop?
#include "iostream"
#include "ThreadPool.h"
#include <chrono>
#include <thread>
using namespace std;
using namespace dynamicThreadPool;
class test {
int x;
public:
test(int x_in) : x(x_in) {}
void task()
{
cout << x << endl;
}
};
int main(void)
{
thread_pool pool;
for (int i = 0; i < 10; i++)
{
test* myTest = new test(i);
std::function<void()> myFunction = [&] {myTest->task(); };
pool.submit(myFunction);
}
while (!pool.isQueueEmpty())
{
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
cout << "waiting for tasks to complete" << endl;
}
return 0;
}
And here is my thread pool, I got this definition from "C++ Concurrency in Action" book:
#pragma once
#include <queue>
#include <future>
#include <list>
#include <functional>
#include <memory>
template<typename T>
class threadsafe_queue
{
private:
mutable std::mutex mut;
std::queue<T> data_queue;
std::condition_variable data_cond;
public:
threadsafe_queue() {}
void push(T new_value)
{
std::lock_guard<std::mutex> lk(mut);
data_queue.push(std::move(new_value));
data_cond.notify_one();
}
void wait_and_pop(T& value)
{
std::unique_lock<std::mutex> lk(mut);
data_cond.wait(lk, [this] {return !data_queue.empty(); });
value = std::move(data_queue.front());
data_queue.pop();
}
bool try_pop(T& value)
{
std::lock_guard<std::mutex> lk(mut);
if (data_queue.empty())
return false;
value = std::move(data_queue.front());
data_queue.pop();
return true;
}
bool empty() const
{
std::lock_guard<std::mutex> lk(mut);
return data_queue.empty();
}
};
class join_threads
{
std::vector<std::thread>& threads;
public:
explicit join_threads(std::vector<std::thread>& threads_) : threads(threads_) {}
~join_threads()
{
for (unsigned long i = 0; i < threads.size(); i++)
{
if (threads[i].joinable())
{
threads[i].join();
}
}
}
};
class thread_pool
{
std::atomic_bool done;
threadsafe_queue<std::function<void()> > work_queue;
std::vector<std::thread> threads;
join_threads joiner;
void worker_thread()
{
while (!done)
{
std::function<void()> task;
if (work_queue.try_pop(task))
{
task();
}
else
{
std::this_thread::yield();
}
}
}
public:
thread_pool() : done(false), joiner(threads)
{
unsigned const thread_count = std::thread::hardware_concurrency();
try
{
for (unsigned i = 0; i < thread_count; i++)
{
threads.push_back(std::thread(&thread_pool::worker_thread, this));
}
}
catch (...)
{
done = true;
throw;
}
}
~thread_pool()
{
done = true;
}
template<typename FunctionType>
void submit(FunctionType f)
{
work_queue.push(std::function<void()>(f));
}
bool isQueueEmpty()
{
return work_queue.empty();
}
};
There's too much code to analyse all of it but you take a pointer by reference here:
{
test* myTest = new test(i);
std::function<void()> myFunction = [&] {myTest->task(); };
pool.submit(myFunction);
} // pointer goes out of scope
After that pointer has gone out of scope you will have undefined behavior if you later do myTest->task();.
To solve that immediate problem, copy the pointer and delete the object afterwards to not leak memory:
{
test* myTest = new test(i);
std::function<void()> myFunction = [=] {myTest->task(); delete myTest; };
pool.submit(myFunction);
}
I suspect this could be solved without using new at all, but I'll leave that up to you.
I am trying to create a data structure, ExpiringDeque. It should be somewhat similar to std::deque. Let's say I need only push_back(), size() and pop_front(). The data structure needs to automatically expire up to N first elements every T seconds.
This data structure needs to manage its own queue and expiration thread internally.
How do I write it in a thread safe way? This is an example that I came up with, does this seem reasonable? What am I missing?
#include <algorithm>
#include <atomic>
#include <cassert>
#include <deque>
#include <mutex>
#include <thread>
#include <unistd.h>
#include <iostream>
template <typename T>
class ExpiringDeque {
public:
ExpiringDeque(int n, int t) : numElements_(n), interval_(t), running_(true), items_({}) {
expiringThread_ = std::thread{[&] () {
using namespace std::chrono_literals;
int waitCounter = 0;
while (true) {
if (!running_) {
return;
}
std::this_thread::sleep_for(1s);
if (waitCounter++ < interval_) {
continue;
}
std::lock_guard<std::mutex> guard(mutex_);
waitCounter = 0;
int numToErase = std::min(numElements_, static_cast<int>(items_.size()));
std::cout << "Erasing " << numToErase << " elements\n";
items_.erase(items_.begin(), items_.begin() + numToErase);
}
}};
}
~ExpiringDeque() {
running_ = false;
expiringThread_.join();
}
T pop_front() {
if (items_.size() == 0) {
throw std::out_of_range("Empty deque");
}
std::lock_guard<std::mutex> guard(mutex_);
T item = items_.front();
items_.pop_front();
return item;
}
int size() {
std::lock_guard<std::mutex> guard(mutex_);
return items_.size();
}
void push_back(T item) {
std::lock_guard<std::mutex> guard(mutex_);
items_.push_back(item);
}
private:
int numElements_;
int interval_;
std::atomic<bool> running_;
std::thread expiringThread_;
std::mutex mutex_;
std::deque<T> items_;
};
int main() {
ExpiringDeque<int> ed(10, 3);
ed.push_back(1);
ed.push_back(2);
ed.push_back(3);
assert(ed.size() == 3);
assert(ed.pop_front() == 1);
assert(ed.size() == 2);
// wait for expiration
sleep(5);
assert(ed.size() == 0);
ed.push_back(10);
assert(ed.size() == 1);
assert(ed.pop_front() == 10);
return 0;
}
You can avoid an unnecessary wait in the destructor of ExpiringDeque by using a condition variable. I would also use std::condition_variable::wait_for with a predicate to check the running_ flag. This will ensure that you either wait for a timeout or a notification, whichever is earlier. You avoid using waitCounter and continue this way.
Another thing you should do is lock the mutex before checking the size of your deque in pop_front(), otherwise it's not thread safe.
Here's an updated version of your code:
template <typename T>
class ExpiringDeque {
public:
ExpiringDeque(int n, int t) : numElements_(n), interval_(t), running_(true), items_({}), cv_() {
expiringThread_ = std::thread{ [&]() {
using namespace std::chrono_literals;
while (true) {
//Wait for timeout or notification
std::unique_lock<std::mutex> lk(mutex_);
cv_.wait_for(lk, interval_ * 1s, [&] { return !running_; });
if (!running_)
return;
//Mutex is locked already - no need to lock again
int numToErase = std::min(numElements_, static_cast<int>(items_.size()));
std::cout << "Erasing " << numToErase << " elements\n";
items_.erase(items_.begin(), items_.begin() + numToErase);
}
} };
}
~ExpiringDeque() {
//Set flag and notify worker thread
{
std::lock_guard<std::mutex> lk(mutex_);
running_ = false;
}
cv_.notify_one();
expiringThread_.join();
}
T pop_front() {
std::lock_guard<std::mutex> guard(mutex_);
if (items_.size() == 0) {
throw std::out_of_range("Empty deque");
}
T item = items_.front();
items_.pop_front();
return item;
}
...
private:
int numElements_;
int interval_;
bool running_;
std::thread expiringThread_;
std::mutex mutex_;
std::deque<T> items_;
std::condition_variable cv_;
};
You can make the running_ flag a normal bool since the std::condition_variable::wait_for atomically checks for the timeout or notification.
I am trying to make a thread safe queue in C++17 based on condition variables.
How do I correctly interrupt the WaitAndPop() method in the queue's destructor?
The problem is that user classes will be waiting on the WaitAndPop() call to return before they destruct, meaning that their member queue never destructs, meaning that the return never happens, and I have a deadlock.
Here is a simplified example that illustrates the problem:
#include <condition_variable>
#include <future>
#include <iostream>
#include <mutex>
#include <queue>
#include <thread>
using namespace std;
using namespace chrono_literals;
class ThreadsafeQueue {
private:
condition_variable cv_;
bool cancel_;
mutex mut_;
queue<int> queue_;
public:
ThreadsafeQueue() : cancel_(false){};
~ThreadsafeQueue() {
// although this would stop the cv, it never runs.
cancel_ = true;
cv_.notify_all();
scoped_lock<mutex> lk(mut_);
}
void Push(int x) {
{
scoped_lock<mutex> lk(mut_);
queue_.push(x);
}
cv_.notify_all();
}
// returns true if successful
bool WaitAndPop(int &out) {
unique_lock<mutex> lk(mut_);
cv_.wait(lk, [this]() { return cancel_ || ! queue_.empty(); });
if (cancel_) return false;
out = queue_.front();
queue_.pop();
return true;
}
};
class MyClass {
private:
future<void> fill_fut_;
future<void> serve_fut_;
ThreadsafeQueue queue_;
bool running_;
public:
MyClass() : running_(true) {
fill_fut_ = async(launch::async, &MyClass::FillThread, this);
serve_fut_ = async(launch::async, &MyClass::ServeThread, this);
};
~MyClass() {
running_ = false;
fill_fut_.get();
serve_fut_.get(); // this prevents the threadsafe queue from destructing,
// which
// prevents the serve thread from stopping.
}
void FillThread() {
while (running_) {
queue_.Push(rand() & 100);
this_thread::sleep_for(200ms);
}
}
void ServeThread() {
while (running_) {
int x;
bool ok = queue_.WaitAndPop(x); // this never returns because the queue
// never destructs
if (ok)
cout << "popped: " << x << endl; // prints five times
else
cout << "pop failed"; // does not reach here
}
}
};
int main() {
MyClass obj;
this_thread::sleep_for(1s);
return 0;
}
My little consumer-producer problem had me stumped for some time. I didn't want an implementation where one producer pushes some data round-robin to the consumers, filling up their queues of data respectively.
I wanted to have one producer, x consumers, but the producer waits with producing new data until a consumer is free again. In my example there are 3 consumers so the producer creates a maximum of 3 objects of data at any given time. Since I don't like polling, the consumers were supposed to notify the producer when they are done. Sounds simple, but the solution I found doesn't please me. First the code.
#include "stdafx.h"
#include <mutex>
#include <iostream>
#include <future>
#include <map>
#include <atomic>
std::atomic_int totalconsumed;
class producer {
using runningmap_t = std::map<int, std::pair<std::future<void>, bool>>;
// Secure the map of futures.
std::mutex mutex_;
runningmap_t running_;
// Used for finished notification
std::mutex waitermutex_;
std::condition_variable waiter_;
// The magic number to limit the producer.
std::atomic<int> count_;
bool can_run();
void clean();
// Fake a source, e.g. filesystem scan.
int fakeiter;
int next();
bool has_next() const;
public:
producer() : fakeiter(50) {}
void run();
void notify(int value);
void wait();
};
class consumer {
producer& producer_;
public:
consumer(producer& producer) : producer_(producer) {}
void run(int value) {
std::this_thread::sleep_for(std::chrono::milliseconds(42));
std::cout << "Consumed " << value << " on (" << std::this_thread::get_id() << ")" << std::endl;
totalconsumed++;
producer_.notify(value);
}
};
// Only if less than three threads are active, another gets to run.
bool producer::can_run() { return count_.load() < 3; }
// Verify if there's something to consume
bool producer::has_next() const { return 0 != fakeiter; }
// Produce the next value for consumption.
int producer::next() { return --fakeiter; }
// Remove the futures that have reported to be finished.
void producer::clean()
{
for (auto it = running_.begin(); it != running_.end(); ) {
if (it->second.second) {
it = running_.erase(it);
}
else {
++it;
}
}
}
// Runs the producer. Creates a new consumer for every produced value. Max 3 at a time.
void producer::run()
{
while (has_next()) {
if (can_run()) {
auto c = next();
count_++;
auto future = std::async(&consumer::run, consumer(*this), c);
std::unique_lock<std::mutex> lock(mutex_);
running_[c] = std::make_pair(std::move(future), false);
clean();
}
else {
std::unique_lock<std::mutex> lock(waitermutex_);
waiter_.wait(lock);
}
}
}
// Consumers diligently tell the producer that they are finished.
void producer::notify(int value)
{
count_--;
mutex_.lock();
running_[value].second = true;
mutex_.unlock();
std::unique_lock<std::mutex> waiterlock(waitermutex_);
waiter_.notify_all();
}
// Wait for all consumers to finish.
void producer::wait()
{
while (!running_.empty()) {
mutex_.lock();
clean();
mutex_.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
// Looks like the application entry point.
int main()
{
producer p;
std::thread pthread(&producer::run, &p);
pthread.join();
p.wait();
std::cout << std::endl << std::endl << "Total consumed " << totalconsumed.load() << std::endl;
return 0;
}
The part I don't like is the list of values mapped to the futures, called running_. I need to keep the future around until the consumer is actually done. I can't remove the future from the map in the notify method or else I'll kill the thread that is currently calling notify.
Am I missing something that could simplify this construct?
template<class T>
struct slotted_data {
std::size_t I;
T t;
};
template<class T>
using sink = std::function<void(T)>;
template<class T, std::size_t N>
struct async_slots {
bool produce( slotted_data<T> data ) {
if (terminate || data.I>=N) return false;
{
auto l = lock();
if (slots[data.I]) return false;
slots[data.I] = std::move(data.t);
}
cv.notify_one();
return true;
}
// rare use of non-lambda cv.wait in the wild!
bool consume(sink<slotted_data<T>> f) {
auto l = lock();
while(!terminate) {
for (auto& slot:slots) {
if (slot) {
auto r = std::move(*slot);
slot = std::nullopt;
f({std::size_t(&slot-slots.data()), std::move(r)}); // invoke in lock
return true;
}
}
cv.wait(l);
}
return false;
}
// easier and safer version:
std::optional<slotted_data<T>> consume() {
std::optional<slotted_data<T>> r;
bool worked = consume([&](auto&& data) { r = std::move(data); });
if (!worked) return {};
return r;
}
void finish() {
{
auto l = lock();
terminate = true;
}
cv.notify_all();
}
private:
auto lock() { return std::unique_lock<std::mutex>(m); }
std::mutex m;
std::condition_variable cv;
std::array< std::optional<T>, N > slots;
bool terminate = false;
};
async_slots provides a fixed number of slots and an awaitable consume. If you try to produce two things in the same slot, the producer function returns false and ignores you.
consume invokes the sink of the data inside the mutex in a continuation passing style. This permits atomic consumption.
We want to invert producer and consumer:
template<class T, std::size_t N>
struct slotted_consumer {
bool consume( std::size_t I, sink<T> sink ) {
std::optional<T> data;
std::condition_variable cv;
std::mutex m;
bool worked = slots.produce(
{
I,
[&](auto&& t){
{
std::unique_lock<std::mutex> l(m);
data.emplace(std::move(t));
}
cv.notify_one();
}
}
);
if (!worked) return false;
std::unique_lock<std::mutex> l(m);
cv.wait(l, [&]()->bool{
return (bool)data;
});
sink( std::move(*data) );
return true;
}
bool produce( T t ) {
return slots.consume(
[&](auto&& f) {
f.t( std::move(t) );
}
);
}
void finish() {
slots.finish();
}
private:
async_slots< sink<T>, N > slots;
};
we have to take some care to execute sink in a context where we are not holding the mutex of async_slots, which is why consume above is so strange.
Live example.
You share a slotted_consumer< int, 3 > slots. The producing thread repeatedly calls slots.produce(42);. It blocks until a new consumer lines up.
Consumer #2 calls slots.consume( 2, [&](int x){ /* code to consume x */ } ), and #1 and #0 pass their slot numbers as well.
All 3 consumers can be waiting for the next production. The above system defaults to feeding #0 first if it is waiting for more work; we could make it "fair" at a cost of keeping a bit more state.
I'm in the process of porting some Java code over to C++, and one particular section makes use of a BlockingQueue to pass messages from many producers to a single consumer.
If you are not familiar with what a Java BlockingQueue is, it is just a queue that has a hard capacity, which exposes thread safe methods to put() and take() from the queue. put() blocks if the queue is full, and take() blocks if the queue is empty. Also, timeout-sensitive versions of these methods are supplied.
Timeouts are relevant to my use-case, so a recommendation that supplies those is ideal. If not, I can code up some myself.
I've googled around and quickly browsed the Boost libraries and I'm not finding anything like this. Maybe I'm blind here...but does anyone know of a good recommendation?
Thanks!
It isn't fixed size and it doesn't support timeouts but here is a simple implementation of a queue I had posted recently using C++ 2011 constructs:
#include <mutex>
#include <condition_variable>
#include <deque>
template <typename T>
class queue
{
private:
std::mutex d_mutex;
std::condition_variable d_condition;
std::deque<T> d_queue;
public:
void push(T const& value) {
{
std::unique_lock<std::mutex> lock(this->d_mutex);
d_queue.push_front(value);
}
this->d_condition.notify_one();
}
T pop() {
std::unique_lock<std::mutex> lock(this->d_mutex);
this->d_condition.wait(lock, [=]{ return !this->d_queue.empty(); });
T rc(std::move(this->d_queue.back()));
this->d_queue.pop_back();
return rc;
}
};
It should be trivial to extend and use a timed wait for popping. The main reason I haven't done it is that I'm not happy with the interface choices I have thought of so far.
Here's an example of a blocking queue with shutdown request feature:
template <typename T> class BlockingQueue {
std::condition_variable _cvCanPop;
std::mutex _sync;
std::queue<T> _qu;
bool _bShutdown = false;
public:
void Push(const T& item)
{
{
std::unique_lock<std::mutex> lock(_sync);
_qu.push(item);
}
_cvCanPop.notify_one();
}
void RequestShutdown() {
{
std::unique_lock<std::mutex> lock(_sync);
_bShutdown = true;
}
_cvCanPop.notify_all();
}
bool Pop(T &item) {
std::unique_lock<std::mutex> lock(_sync);
for (;;) {
if (_qu.empty()) {
if (_bShutdown) {
return false;
}
}
else {
break;
}
_cvCanPop.wait(lock);
}
item = std::move(_qu.front());
_qu.pop();
return true;
}
};
U should write the class of semephore first
#ifndef SEMEPHORE_H
#define SEMEPHORE_H
#include <mutex>
#include <condition_variable>
class semephore {
public:
semephore(int count = 0)
: count(count),
m(),
cv()
{
}
void await() {
std::unique_lock<std::mutex> lk(m);
--count;
if (count < 0) {
cv.wait(lk);
}
}
void post() {
std::unique_lock<std::mutex> lk(m);
++count;
if (count <= 0) {
cv.notify_all();
}
}
private:
int count;
std::mutex m;
std::condition_variable cv;
};
#endif // SEMEPHORE_H
now the blocked_queue can use the semephore to deal with it
#ifndef BLOCKED_QUEUE_H
#define BLOCKED_QUEUE_H
#include <list>
#include "semephore.h"
template <typename T>
class blocked_queue {
public:
blocked_queue(int count)
: s_products(),
s_free_space(count),
li()
{
}
void put(const T &t) {
s_free_space.await();
li.push_back(t);
s_products.post();
}
T take() {
s_products.await();
T res = li.front();
li.pop_front();
s_free_space.post();
return res;
}
private:
semephore s_products;
semephore s_free_space;
std::list<T> li;
};
#endif // BLOCKED_QUEUE_H
OK I'm a bit late to the party but I think this is a better fit for the Java's BlockingQueue implementation. Here I too use one mutex and two conditions to look after not full and not empty. IMO a BlockingQueue makes more sense with limited capacity which I didn't see in the other answers. I include a simple test scenario too:
#include <iostream>
#include <algorithm>
#include <queue>
#include <mutex>
#include <thread>
#include <condition_variable>
template<typename T>
class blocking_queue {
private:
size_t _capacity;
std::queue<T> _queue;
std::mutex _mutex;
std::condition_variable _not_full;
std::condition_variable _not_empty;
public:
inline blocking_queue(size_t capacity) : _capacity(capacity) {
// empty
}
inline size_t size() const {
std::unique_lock<std::mutex> lock(_mutex);
return _queue.size();
}
inline bool empty() const {
std::unique_lock<std::mutex> lock(_mutex);
return _queue.empty();
}
inline void push(const T& elem) {
{
std::unique_lock<std::mutex> lock(_mutex);
// wait while the queue is full
while (_queue.size() >= _capacity) {
_not_full.wait(lock);
}
std::cout << "pushing element " << elem << std::endl;
_queue.push(elem);
}
_not_empty.notify_all();
}
inline void pop() {
{
std::unique_lock<std::mutex> lock(_mutex);
// wait while the queue is empty
while (_queue.size() == 0) {
_not_empty.wait(lock);
}
std::cout << "popping element " << _queue.front() << std::endl;
_queue.pop();
}
_not_full.notify_one();
}
inline const T& front() {
std::unique_lock<std::mutex> lock(_mutex);
// wait while the queue is empty
while (_queue.size() == 0) {
_not_empty.wait(lock);
}
return _queue.front();
}
};
int main() {
blocking_queue<int> queue(5);
// create producers
std::vector<std::thread> producers;
for (int i = 0; i < 10; i++) {
producers.push_back(std::thread([&queue, i]() {
queue.push(i);
// produces too fast
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}));
}
// create consumers
std::vector<std::thread> consumers;
for (int i = 0; i < 10; i++) {
producers.push_back(std::thread([&queue, i]() {
queue.pop();
// consumes too slowly
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}));
}
std::for_each(producers.begin(), producers.end(), [](std::thread &thread) {
thread.join();
});
std::for_each(consumers.begin(), consumers.end(), [](std::thread &thread) {
thread.join();
});
return EXIT_SUCCESS;
}