I have implemented a thread pool in C++, in which I create Nthread workers to which I assign some jobs from a queue I keep pushing task to. When The queue is empty and/or when I say so, the threads stop working. Everything runs on WSL (Ubuntu 20.04 Focal).
If I open Windows task manager when I launch the program, the number of threads actually working is more than the ones allocated. For example, if I run the program with 4 threads on a 12 cores machine, I can see at least 6 cores with above-average activity; if I use 10 threads, all 12 cores go to 100%. Is this behavior somehow expected or am I doing something wrong? I would expect to see one more thread than the ones allocated because I spawn Nthread threads from the main one (which by the way should stay quiet waiting for the others to finish...), but I cannot explain what I see.
I want to stress that I create all the Nthread threads before all operations, then I populate and process the queue and finally I destroy the threads, i.e. as far as I can see I do not create/destroy threads continouosly during the calculations.
EDIT
I forgot to mention I operate under C++11.
Here is the relevant C++ code.
In main.cc
ThreadPool *pool = new ThreadPool(fNThreads);
std::vector<std::function<void(int)>> *caller =
new std::vector<std::function<void(int)>>;
for (size_t iter = 0; iter < nIter; ++iter)
{
pool->ResetQueue();
for (size_t j = 0; nmax < 2; ++j)
{
caller->push_back(
[=](int iThr){function(iter, j, iThr);});
pool->PushTask((*caller)[j]);
}
pool->WaitForCompletion(1.e-4);
caller->clear();
}
delete caller;
delete pool;
SynchronizedQueue.hh
#ifndef SYNCQUEUE_H
#define SYNCQUEUE_H
#include <list>
#include <mutex>
#include <condition_variable>
template<typename T>
class SynchronizedQueue
{
public:
SynchronizedQueue();
~SynchronizedQueue();
void Put(T const & data);
void Put(T const && data);
T Get();
size_t Size();
SynchronizedQueue(SynchronizedQueue const &) = delete;
SynchronizedQueue & operator=(SynchronizedQueue const &) = delete;
SynchronizedQueue(SynchronizedQueue&&) = delete;
SynchronizedQueue & operator=(SynchronizedQueue&&) = delete;
private:
std::list<T> queue;
std::mutex mut;
std::condition_variable condvar;
};
template<typename T>
SynchronizedQueue<T>::SynchronizedQueue()
{}
template<typename T>
SynchronizedQueue<T>::~SynchronizedQueue()
{}
template<typename T>
void SynchronizedQueue<T>::Put(T const & data)
{
std::unique_lock<std::mutex> lck(mut);
queue.push_back(data);
condvar.notify_one();
}
template<typename T>
T SynchronizedQueue<T>::Get()
{
std::unique_lock<std::mutex> lck(mut);
while (queue.empty())
{
condvar.wait(lck);
}
T result = queue.front();
queue.pop_front();
return result;
}
template<typename T>
size_t SynchronizedQueue<T>::Size()
{
std::unique_lock<std::mutex> lck(mut);
return queue.size();
}
#endif
ThreadPool.hh
#ifndef THREADPOOL_H
#define THREADPOOL_H
#include "SynchronizedQueue.hh"
#include <atomic>
#include <functional>
#include <mutex>
#include <thread>
#include <vector>
class ThreadPool
{
public:
ThreadPool(unsigned int nThreads = 1);
virtual ~ThreadPool();
void PushTask(std::function<void(int)> func);
void WaitForCompletion();
void WaitForCompletion(int sec);
void ResetQueue();
void JoinThreads();
void Delay(int sec);
size_t GetWorkQueueLength();
private:
void WorkerThread(int i);
std::atomic<bool> done;
unsigned int threadCount;
SynchronizedQueue<std::function<void(int)>> workQueue;
std::vector<std::thread> threads;
};
#endif
ThreadPool.cc
#include "ThreadPool.hh"
#include "SynchronizedQueue.hh"
#include <chrono>
//#include <iostream>
void doNothing(int i)
{}
ThreadPool::ThreadPool(unsigned int nThreads)
: done(false)
{
if (nThreads <= 0)
{
threadCount = std::thread::hardware_concurrency();
}
else
{
threadCount = nThreads;
}
for (unsigned int i = 0; i < threadCount; ++i)
{
threads.push_back(std::thread(&ThreadPool::WorkerThread, this, i));
}
}
ThreadPool::~ThreadPool()
{
WaitForCompletion();
JoinThreads();
}
void ThreadPool::WaitForCompletion(int sec)
{
if (!done)
{
while (GetWorkQueueLength())
{
std::this_thread::sleep_for(std::chrono::seconds(sec));
}
done = true;
for (unsigned int i = 0; i < threadCount; ++i)
{
PushTask(&doNothing);
}
}
}
void ThreadPool::WaitForCompletion()
{
if (!done)
{
while (GetWorkQueueLength())
{}
done = true;
for (unsigned int i = 0; i < threadCount; ++i)
{
PushTask(&doNothing);
}
}
}
void ThreadPool::JoinThreads()
{
for (auto& th : threads)
{
if (th.joinable())
{
th.join();
}
}
}
void ThreadPool::Delay(int sec)
{
std::this_thread::sleep_for(std::chrono::seconds(sec));
}
void ThreadPool::PushTask(std::function<void(int)> func)
{
workQueue.Put(func);
}
void ThreadPool::ResetQueue()
{
done = false;
}
void ThreadPool::WorkerThread(int i)
{
while (!done)
{
workQueue.Get()(i);
}
}
size_t ThreadPool::GetWorkQueueLength()
{
return workQueue.Size();
}
Related
I want to calculate number of even numbers among all pairwise sums till 100000. And I want to do it using threadpools. Previously I did it in a static way, i.e., I allocated work to all the threads in the beginning itself. I was able to achieve linear speedup in that case. But the bottleneck is that the threads which started early, finished early (because there were less pairs to compute). So instead of that I want to allocate work to the threads dynamically, i.e., I will initially assign some work to the threads and as soon as they complete the work, they come back to take more work from the queue. Below is my threadpool code,
main.cpp :
#include <iostream>
#include <random>
#include<chrono>
#include<iomanip>
#include<future>
#include<vector>
#include "../include/ThreadPool.h"
std::random_device rd;
std::mt19937 mt(rd());
std::uniform_int_distribution<int> dist(-10, 10);
auto rnd = std::bind(dist, mt);
int thread_work;
long long pairwise(const int start) {
long long sum = 0;
long long counter = 0;
for(int i = start+1; i <= start+thread_work; i++)
{
for(int j = i-1; j >= 0; j--)
{
sum = i + j;
if(sum%2 == 0)
counter++;
}
}
//std::cout<<counter<<std::endl;
return counter;
}
int main(int argc, char *argv[])
{
// Create pool with x threads
int x;
std::cout<<"Enter num of threads : ";
std::cin>>x;
std::cout<<"Enter thread_work : ";
std::cin>>thread_work;
ThreadPool pool(x);
// Initialize pool
pool.init();
int N = 100000;
long long res = 0;
auto start = std::chrono::high_resolution_clock::now();
for(int i = 0; i < N; i = i + thread_work)
{
std::future<long long int> fut = pool.submit(pairwise,i);
res += fut.get();
}
std::cout<<"total is "<<res<<std::endl;
pool.shutdown();
auto end = std::chrono::high_resolution_clock::now();
double time_taken = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
time_taken *= 1e-9;
std::cout << "Time taken by program is : " << std::fixed << time_taken << std::setprecision(9)<<" secs"<<std::endl;
return 0;
}
my SafeQueue.h :
#pragma once
#include <mutex>
#include <queue>
// Thread safe implementation of a Queue using an std::queue
template <typename T>
class SafeQueue {
private:
std::queue<T> m_queue;
std::mutex m_mutex;
public:
SafeQueue() {
}
SafeQueue(SafeQueue& other) {
//TODO:
}
~SafeQueue() {
}
bool empty() {
std::unique_lock<std::mutex> lock(m_mutex);
return m_queue.empty();
}
int size() {
std::unique_lock<std::mutex> lock(m_mutex);
return m_queue.size();
}
void enqueue(T& t) {
std::unique_lock<std::mutex> lock(m_mutex);
m_queue.push(t);
}
bool dequeue(T& t) {
std::unique_lock<std::mutex> lock(m_mutex);
if (m_queue.empty()) {
return false;
}
t = std::move(m_queue.front());
m_queue.pop();
return true;
}
};
and my ThreadPool.h :
#pragma once
#include <functional>
#include <future>
#include <mutex>
#include <queue>
#include <thread>
#include <utility>
#include <vector>
#include "SafeQueue.h"
class ThreadPool {
private:
class ThreadWorker {
private:
int m_id;
ThreadPool * m_pool;
public:
ThreadWorker(ThreadPool * pool, const int id)
: m_pool(pool), m_id(id) {
}
void operator()() {
std::function<void()> func;
bool dequeued;
while (!m_pool->m_shutdown) {
{
std::unique_lock<std::mutex> lock(m_pool->m_conditional_mutex);
if (m_pool->m_queue.empty()) {
m_pool->m_conditional_lock.wait(lock);
}
dequeued = m_pool->m_queue.dequeue(func);
}
if (dequeued) {
func();
}
}
}
};
bool m_shutdown;
SafeQueue<std::function<void()>> m_queue;
std::vector<std::thread> m_threads;
std::mutex m_conditional_mutex;
std::condition_variable m_conditional_lock;
public:
ThreadPool(const int n_threads)
: m_threads(std::vector<std::thread>(n_threads)), m_shutdown(false) {
}
ThreadPool(const ThreadPool &) = delete;
ThreadPool(ThreadPool &&) = delete;
ThreadPool & operator=(const ThreadPool &) = delete;
ThreadPool & operator=(ThreadPool &&) = delete;
// Inits thread pool
void init() {
for (int i = 0; i < m_threads.size(); ++i) {
m_threads[i] = std::thread(ThreadWorker(this, i));
}
}
// Waits until threads finish their current task and shutdowns the pool
void shutdown() {
m_shutdown = true;
m_conditional_lock.notify_all();
for (int i = 0; i < m_threads.size(); ++i) {
if(m_threads[i].joinable()) {
m_threads[i].join();
}
}
}
// Submit a function to be executed asynchronously by the pool
template<typename F, typename...Args>
auto submit(F&& f, Args&&... args) -> std::future<decltype(f(args...))> {
// Create a function with bounded parameters ready to execute
std::function<decltype(f(args...))()> func = std::bind(std::forward<F>(f), std::forward<Args>(args)...);
// Encapsulate it into a shared ptr in order to be able to copy construct / assign
auto task_ptr = std::make_shared<std::packaged_task<decltype(f(args...))()>>(func);
// Wrap packaged task into void function
std::function<void()> wrapper_func = [task_ptr]() {
(*task_ptr)();
};
// Enqueue generic wrapper function
m_queue.enqueue(wrapper_func);
// Wake up one thread if its waiting
m_conditional_lock.notify_one();
// Return future from promise
return task_ptr->get_future();
}
};
I have asked a simpler version of this question before and got the correct answer: Thread pools not working with large number of tasks
Now I am trying to run tasks from an object of a class in parallel using a thread pool. My task is simple and only prints a number for that instance of class. I am expecting numbers 0->9 get printed but instead I get some numbers get printed more than once and some numbers not printed at all. Can anyone see what I am doing wrong with creating tasks in my loop?
#include "iostream"
#include "ThreadPool.h"
#include <chrono>
#include <thread>
using namespace std;
using namespace dynamicThreadPool;
class test {
int x;
public:
test(int x_in) : x(x_in) {}
void task()
{
cout << x << endl;
}
};
int main(void)
{
thread_pool pool;
for (int i = 0; i < 10; i++)
{
test* myTest = new test(i);
std::function<void()> myFunction = [&] {myTest->task(); };
pool.submit(myFunction);
}
while (!pool.isQueueEmpty())
{
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
cout << "waiting for tasks to complete" << endl;
}
return 0;
}
And here is my thread pool, I got this definition from "C++ Concurrency in Action" book:
#pragma once
#include <queue>
#include <future>
#include <list>
#include <functional>
#include <memory>
template<typename T>
class threadsafe_queue
{
private:
mutable std::mutex mut;
std::queue<T> data_queue;
std::condition_variable data_cond;
public:
threadsafe_queue() {}
void push(T new_value)
{
std::lock_guard<std::mutex> lk(mut);
data_queue.push(std::move(new_value));
data_cond.notify_one();
}
void wait_and_pop(T& value)
{
std::unique_lock<std::mutex> lk(mut);
data_cond.wait(lk, [this] {return !data_queue.empty(); });
value = std::move(data_queue.front());
data_queue.pop();
}
bool try_pop(T& value)
{
std::lock_guard<std::mutex> lk(mut);
if (data_queue.empty())
return false;
value = std::move(data_queue.front());
data_queue.pop();
return true;
}
bool empty() const
{
std::lock_guard<std::mutex> lk(mut);
return data_queue.empty();
}
};
class join_threads
{
std::vector<std::thread>& threads;
public:
explicit join_threads(std::vector<std::thread>& threads_) : threads(threads_) {}
~join_threads()
{
for (unsigned long i = 0; i < threads.size(); i++)
{
if (threads[i].joinable())
{
threads[i].join();
}
}
}
};
class thread_pool
{
std::atomic_bool done;
threadsafe_queue<std::function<void()> > work_queue;
std::vector<std::thread> threads;
join_threads joiner;
void worker_thread()
{
while (!done)
{
std::function<void()> task;
if (work_queue.try_pop(task))
{
task();
}
else
{
std::this_thread::yield();
}
}
}
public:
thread_pool() : done(false), joiner(threads)
{
unsigned const thread_count = std::thread::hardware_concurrency();
try
{
for (unsigned i = 0; i < thread_count; i++)
{
threads.push_back(std::thread(&thread_pool::worker_thread, this));
}
}
catch (...)
{
done = true;
throw;
}
}
~thread_pool()
{
done = true;
}
template<typename FunctionType>
void submit(FunctionType f)
{
work_queue.push(std::function<void()>(f));
}
bool isQueueEmpty()
{
return work_queue.empty();
}
};
There's too much code to analyse all of it but you take a pointer by reference here:
{
test* myTest = new test(i);
std::function<void()> myFunction = [&] {myTest->task(); };
pool.submit(myFunction);
} // pointer goes out of scope
After that pointer has gone out of scope you will have undefined behavior if you later do myTest->task();.
To solve that immediate problem, copy the pointer and delete the object afterwards to not leak memory:
{
test* myTest = new test(i);
std::function<void()> myFunction = [=] {myTest->task(); delete myTest; };
pool.submit(myFunction);
}
I suspect this could be solved without using new at all, but I'll leave that up to you.
I was implementing a multi-threading database using a thread pool. Everything was fine and the functions can execute all the codes before return 0.
However, the function does not end after the return 0 in main(). I used _Exit(0) to force exit, which is not very nasty. I ran it with valgrind and there is no memory leak until the end of function.
As you might see in the picture, all the threads are detached, so I shouldn't be running into trouble with threads. But what could go run such that the program can't stop?
auto rwfunc = [](int &id,struct rwinfo &_rwinfo){
Qtable.vec_qpvec[_rwinfo.tableid][id].iswriter?
Writer(id,_rwinfo):Reader(id,_rwinfo);};
//my lambda function to insert my function into the thread
this_thread::yield();
if (COPYFLAG){
for (unsigned int i = 0; i < Qtable.tablenum; ++i) {
for (int j = 0; j < info_vec[i].vecsize; ++j) {
pool.push(rwfunc,j,info_vec[i]);
}
}
}
//pushing function into the pool
Minimal reproducible example Definition and Thread pool
#include <getopt.h>
#include <fstream>
#include <iostream>
#include <string>
#include <mutex>
#include <thread>
#include <condition_variable>
#include <sstream>
#include <iostream>
#include <semaphore.h>
#include <queue>
#include <functional>
//#pragma once
#include<thread>
#include<vector>
#include<queue>
#include<mutex>
#include<condition_variable>
#include<functional>
#include<future>
//////////////////////////
#define MAX_THREADS std::thread::hardware_concurrency() - 1;
bool EXITFLAG = false;
bool COPYFLAG = false;
//portable way to null the copy and assignment operators
#define NULL_COPY_AND_ASSIGN(T) \
T(const T& other) {(void)other;} \
void operator=(const T& other) { (void)other; }
/* ThreadPool class
It is a singleton. To prevent spawning
tons of threads, I made it a singleton */
class ThreadPool{
public:
//getInstance to allow the second constructor to be called
static ThreadPool& getInstance(int numThreads){
static ThreadPool instance(numThreads);
return instance;
}
void waitfinish(){
for (int i = 0; i < numThreads; ++i) {
}
Pool.clear();
}
//add any arg # function to queue
template <typename Func, typename... Args >
inline auto push(Func&& f, Args&&... args){
//get return type of the function
typedef decltype(f(args...)) retType;
//package the task
std::packaged_task<retType()> task(std::move(std::bind(f, args...)));
// lock jobqueue mutex, add job to the job queue
std::unique_lock<std::mutex> lock(JobMutex);
//get the future from the task before the task is moved into the jobqueue
std::future<retType> future = task.get_future();
//place the job into the queue
JobQueue.emplace( std::make_shared<AnyJob<retType> > (std::move(task)) );
//notify a thread that there is a new job
thread.notify_one();
//return the future for the function so the user can get the return value
return future;
}
inline int getThreadCount(){
return numThreads;
}
private:
//used polymorphism to store any type of function in the job queue
class Job {
private:
std::packaged_task<void()> func;
public:
virtual ~Job() {}
virtual void execute() = 0;
};
template <typename RetType>
class AnyJob : public Job {
private:
std::packaged_task<RetType()> func;
public:
AnyJob(std::packaged_task<RetType()> func) : func(std::move(func)) {}
void execute() {
func();
}
};
// end member classes
//member variables
int numThreads; // number of threads in the pool
std::vector<std::thread> Pool; //the actual thread pool
std::queue<std::shared_ptr<Job>> JobQueue;
std::condition_variable thread;// used to notify threads about available jobs
std::mutex JobMutex; // used to push/pop jobs to/from the queue
//end member variables
/* infinite loop function */
inline void threadManager() {
while (!EXITFLAG) {
std::unique_lock<std::mutex> lock(JobMutex);
thread.wait(lock, [this] {return !JobQueue.empty(); });
//strange bug where it will continue even if the job queue is empty
if (JobQueue.size() < 1)
continue;
(*JobQueue.front()).execute();
JobQueue.pop();
}
std::cerr<<"thread end!"<<std::endl;
}
/* Constructors */
ThreadPool(); //prevent default constructor from being called
//real constructor that is used
inline explicit ThreadPool(int _numThreads) : numThreads(_numThreads) {
int tmp = MAX_THREADS;
if(numThreads > tmp){
numThreads = tmp;
}
Pool.reserve(numThreads);
std::cerr<<"Thread pool core num: "<<numThreads<<std::endl;
for(int i = 0; i != numThreads; ++i){
Pool.emplace_back(std::thread(&ThreadPool::threadManager, this));
Pool.back().detach();
}
}
/* end constructors */
NULL_COPY_AND_ASSIGN(ThreadPool);
}; /* end ThreadPool Class */
using namespace std;
int COUNTER = 0;
mutex id_mtx;
struct rwinfo{
sem_t &FINISHED;
rwinfo(sem_t &finished):
FINISHED(finished)
{}
};
void work_todo(int &id,struct rwinfo &_rwinfo){
id_mtx.lock();
cout<<"Job "<<id<<" is done."<<endl;
COUNTER++;
cerr<<"COUNTER is now : "<<COUNTER<<endl;
if (COUNTER==10){
sem_post(&_rwinfo.FINISHED);
}
std::this_thread::sleep_for(500ms);
id_mtx.unlock();
}
ThreadPool& pool = ThreadPool::getInstance(8);
int main(int argc, char *argv[]) {
std::ios_base::sync_with_stdio(false);
sem_t FINISHED;
sem_init(&FINISHED,1,0);
auto mylambdafunc = [](int &i,struct rwinfo &_rwinfo){work_todo(i,_rwinfo);};
auto A = rwinfo(FINISHED);
for (int i = 0; i < 10; ++i) {
pool.push(mylambdafunc,i,A);
}
cerr<<"Start waiting"<<endl;
sem_wait(&FINISHED);
cerr<<"wake up"<<endl;
EXITFLAG = true;
cerr<<"Leaving"<<endl;
return 0;
}
Main
int main(int argc, char *argv[]) {
std::ios_base::sync_with_stdio(false);
sem_t FINISHED;
sem_init(&FINISHED,1,0);
auto mylambdafunc = [](int &i,struct rwinfo &_rwinfo){work_todo(i,_rwinfo);};
auto A = rwinfo(FINISHED);
for (int i = 0; i < 10; ++i) {
pool.push(mylambdafunc,i,A);
}
cerr<<"Start waiting"<<endl;
sem_wait(&FINISHED);
cerr<<"wake up"<<endl;
EXITFLAG = true;
cerr<<"Leaving"<<endl;
return 0;
}
This example can sometimes stuck and sometimes return correctly. I guess it is because it is a lot less jobs to do and jobs are a lot faster.
Also,another question. I was writing a mutithreading database. Some queries will change the data table and some will not. I treated it as the classic reader writer problem and queued every query as a job into the job queue of the thread pool.
The strange thing is that, The program runs actually fastest when there is only 1 thread in the thread pool. When it has 8 threads, it works a lot slower. I'm sure that the thread number in the thread pool is at most std::thread::hardware_concurrency() -1
What could be the possible reason?
I'm trying to implement a blocking queue with limited size, for one provider and multiple consumers. it is working well when the consumer is sleep()ing for 1 second, but hangs when there is no sleep.
what am I doing wrong?
here is my code:
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
#include <thread>
#include <queue>
#include <mutex>
#include <condition_variable>
using namespace std;
template <class T> class BlockingQueue: public queue<T> {
public:
BlockingQueue() {
queue<T>();
}
BlockingQueue(int size) {
maxSize = size;
queue<T>();
}
void push(T item) {
unique_lock<std::mutex> wlck(writerMutex);
while(Full())
isFull.wait(wlck);
queue<T>::push(item);
if(notEmpty())
isEmpty.notify_one();
}
bool notEmpty() {
return !queue<T>::empty();
}
bool Full(){
return queue<T>::size() >= maxSize;
}
T pop() {
unique_lock<std::mutex> lck(readerMutex);
popMutex.lock();
while(queue<T>::empty()) {
isEmpty.wait(lck);
}
T value = queue<T>::front();
queue<T>::pop();
if(!Full())
isFull.notify_all();
popMutex.unlock();
return value;
}
private:
int maxSize;
std::mutex readerMutex;
std::mutex popMutex;
std::mutex writerMutex;
condition_variable isFull;
condition_variable isEmpty;
};
void runProvider(BlockingQueue<int>* Q) {
int number=0;
while(1) {
Q->push(number);
cout<<"provide "<<number<<endl;
number++;
}
}
void runConsumer(int n,BlockingQueue<int>* Q) {
int number;
while(1) {
number = Q->pop();
cout<<"consume#"<<n<<": "<<number<<endl;
}
}
int main(int argc, char** argv) {
BlockingQueue<int> *Queue = new BlockingQueue<int>(10);
cout<<"starting provider"<<endl;
std:thread provider(runProvider, Queue);
sleep(1);
cout<<"starting consumer"<<endl;
std::thread consumer1(runConsumer, 1,Queue);
std::thread consumer2(runConsumer, 2,Queue);
provider.join();
delete(Queue);
return 0;
}
Here is my fixed code for blocking queue with multiple providers and multiple consumers and limited queue size:
template <class T> class BlockingQueue: public queue<T> {
public:
BlockingQueue(int size) {
maxSize = size;
}
void push(T item) {
unique_lock<std::mutex> wlck(writerMutex);
while(Full())
isFull.wait(wlck);
queue<T>::push(item);
isEmpty.notify_all();
}
bool notEmpty() {
return !queue<T>::empty();
}
bool Full(){
return queue<T>::size() >= maxSize;
}
T pop() {
unique_lock<std::mutex> lck(readerMutex);
while(queue<T>::empty()) {
isEmpty.wait(lck);
}
T value = queue<T>::front();
queue<T>::pop();
if(!Full())
isFull.notify_all();
return value;
}
private:
int maxSize;
std::mutex readerMutex;
std::mutex writerMutex;
condition_variable isFull;
condition_variable isEmpty;
};
I'm in the process of porting some Java code over to C++, and one particular section makes use of a BlockingQueue to pass messages from many producers to a single consumer.
If you are not familiar with what a Java BlockingQueue is, it is just a queue that has a hard capacity, which exposes thread safe methods to put() and take() from the queue. put() blocks if the queue is full, and take() blocks if the queue is empty. Also, timeout-sensitive versions of these methods are supplied.
Timeouts are relevant to my use-case, so a recommendation that supplies those is ideal. If not, I can code up some myself.
I've googled around and quickly browsed the Boost libraries and I'm not finding anything like this. Maybe I'm blind here...but does anyone know of a good recommendation?
Thanks!
It isn't fixed size and it doesn't support timeouts but here is a simple implementation of a queue I had posted recently using C++ 2011 constructs:
#include <mutex>
#include <condition_variable>
#include <deque>
template <typename T>
class queue
{
private:
std::mutex d_mutex;
std::condition_variable d_condition;
std::deque<T> d_queue;
public:
void push(T const& value) {
{
std::unique_lock<std::mutex> lock(this->d_mutex);
d_queue.push_front(value);
}
this->d_condition.notify_one();
}
T pop() {
std::unique_lock<std::mutex> lock(this->d_mutex);
this->d_condition.wait(lock, [=]{ return !this->d_queue.empty(); });
T rc(std::move(this->d_queue.back()));
this->d_queue.pop_back();
return rc;
}
};
It should be trivial to extend and use a timed wait for popping. The main reason I haven't done it is that I'm not happy with the interface choices I have thought of so far.
Here's an example of a blocking queue with shutdown request feature:
template <typename T> class BlockingQueue {
std::condition_variable _cvCanPop;
std::mutex _sync;
std::queue<T> _qu;
bool _bShutdown = false;
public:
void Push(const T& item)
{
{
std::unique_lock<std::mutex> lock(_sync);
_qu.push(item);
}
_cvCanPop.notify_one();
}
void RequestShutdown() {
{
std::unique_lock<std::mutex> lock(_sync);
_bShutdown = true;
}
_cvCanPop.notify_all();
}
bool Pop(T &item) {
std::unique_lock<std::mutex> lock(_sync);
for (;;) {
if (_qu.empty()) {
if (_bShutdown) {
return false;
}
}
else {
break;
}
_cvCanPop.wait(lock);
}
item = std::move(_qu.front());
_qu.pop();
return true;
}
};
U should write the class of semephore first
#ifndef SEMEPHORE_H
#define SEMEPHORE_H
#include <mutex>
#include <condition_variable>
class semephore {
public:
semephore(int count = 0)
: count(count),
m(),
cv()
{
}
void await() {
std::unique_lock<std::mutex> lk(m);
--count;
if (count < 0) {
cv.wait(lk);
}
}
void post() {
std::unique_lock<std::mutex> lk(m);
++count;
if (count <= 0) {
cv.notify_all();
}
}
private:
int count;
std::mutex m;
std::condition_variable cv;
};
#endif // SEMEPHORE_H
now the blocked_queue can use the semephore to deal with it
#ifndef BLOCKED_QUEUE_H
#define BLOCKED_QUEUE_H
#include <list>
#include "semephore.h"
template <typename T>
class blocked_queue {
public:
blocked_queue(int count)
: s_products(),
s_free_space(count),
li()
{
}
void put(const T &t) {
s_free_space.await();
li.push_back(t);
s_products.post();
}
T take() {
s_products.await();
T res = li.front();
li.pop_front();
s_free_space.post();
return res;
}
private:
semephore s_products;
semephore s_free_space;
std::list<T> li;
};
#endif // BLOCKED_QUEUE_H
OK I'm a bit late to the party but I think this is a better fit for the Java's BlockingQueue implementation. Here I too use one mutex and two conditions to look after not full and not empty. IMO a BlockingQueue makes more sense with limited capacity which I didn't see in the other answers. I include a simple test scenario too:
#include <iostream>
#include <algorithm>
#include <queue>
#include <mutex>
#include <thread>
#include <condition_variable>
template<typename T>
class blocking_queue {
private:
size_t _capacity;
std::queue<T> _queue;
std::mutex _mutex;
std::condition_variable _not_full;
std::condition_variable _not_empty;
public:
inline blocking_queue(size_t capacity) : _capacity(capacity) {
// empty
}
inline size_t size() const {
std::unique_lock<std::mutex> lock(_mutex);
return _queue.size();
}
inline bool empty() const {
std::unique_lock<std::mutex> lock(_mutex);
return _queue.empty();
}
inline void push(const T& elem) {
{
std::unique_lock<std::mutex> lock(_mutex);
// wait while the queue is full
while (_queue.size() >= _capacity) {
_not_full.wait(lock);
}
std::cout << "pushing element " << elem << std::endl;
_queue.push(elem);
}
_not_empty.notify_all();
}
inline void pop() {
{
std::unique_lock<std::mutex> lock(_mutex);
// wait while the queue is empty
while (_queue.size() == 0) {
_not_empty.wait(lock);
}
std::cout << "popping element " << _queue.front() << std::endl;
_queue.pop();
}
_not_full.notify_one();
}
inline const T& front() {
std::unique_lock<std::mutex> lock(_mutex);
// wait while the queue is empty
while (_queue.size() == 0) {
_not_empty.wait(lock);
}
return _queue.front();
}
};
int main() {
blocking_queue<int> queue(5);
// create producers
std::vector<std::thread> producers;
for (int i = 0; i < 10; i++) {
producers.push_back(std::thread([&queue, i]() {
queue.push(i);
// produces too fast
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}));
}
// create consumers
std::vector<std::thread> consumers;
for (int i = 0; i < 10; i++) {
producers.push_back(std::thread([&queue, i]() {
queue.pop();
// consumes too slowly
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
}));
}
std::for_each(producers.begin(), producers.end(), [](std::thread &thread) {
thread.join();
});
std::for_each(consumers.begin(), consumers.end(), [](std::thread &thread) {
thread.join();
});
return EXIT_SUCCESS;
}