This code is referred from the following link
I am really not able to make out this error and seeing it for the 1st time
Entire Code
#include <boost/thread/thread.hpp>
#include <boost/lockfree/queue.hpp>
#include <iostream>
#include <queue>
#include <boost/atomic.hpp>
template<typename QueueContents, template<typename QueueContents> class QueueType>
class WorkerThread
{
public:
WorkerThread(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: m_Queue(Queue) // queue shared with consumer
, m_Delay(Delay) // delay for simulating heavy work
, m_NoOfIt(NoOfIt) // number of work items to produce
{}
void Start() // start work
{
m_Thread = boost::thread(&WorkerThread::Work, this);
}
void Stop() // interrupt work
{
m_Thread.interrupt();
}
virtual void Work() = 0;
void WaitUntilFinished()
{
m_Thread.join();
}
protected:
int m_NoOfIt;
int m_Delay;
QueueType<QueueContents> &m_Queue;
private:
boost::thread m_Thread;
};
template<typename QueueContents, template<typename QueueContents> class QueueType>
class Producer : public WorkerThread<QueueContents, QueueType>
{
public:
Producer(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: WorkerThread<QueueContents, QueueType>(Queue, NoOfIt, Delay)
{}
void Work()
{
for (QueueContents i=0; i < this->m_NoOfIt; ++i)
{
// send numbers 0 to 999 into the queue
boost::this_thread::sleep(boost::posix_time::milliseconds(this->m_Delay));
std::ostringstream msg;
msg << "[PRODUCER] Produced value " << i << std::endl;
std::cout << msg.str();
this->m_Queue.Push(i);
}
}
};
// New BSD License
class ResetableEvent
{
bool m_EventStatus;
bool m_AutoResetEvent;
boost::condition_variable m_Signal;
boost::mutex m_Mutex;
public:
explicit ResetableEvent(bool _auto_reset = false)
: m_EventStatus(false)
, m_AutoResetEvent(_auto_reset)
{}
void wait() // wait for event
{
boost::unique_lock<boost::mutex> lock(m_Mutex);
if (m_EventStatus)
{
if (m_AutoResetEvent)
m_EventStatus = false;
return;
}
do
{
m_Signal.wait(lock);
} while(!m_EventStatus);
if (m_AutoResetEvent)
m_EventStatus = false;
}
void set() // this notifies threads waiting for this event
// and makes sure that other threads about to wait
// can immediately proceed
{
boost::lock_guard<boost::mutex> lock(m_Mutex);
m_EventStatus = true;
m_Signal.notify_one();
}
void reset() // reset event: threads who will wait for this
// event will be blocked
{
boost::lock_guard<boost::mutex> lock(m_Mutex);
m_EventStatus = false;
}
};
// Queue class that can be used in multithreading context
template <typename T>
class BoundedThreadSafeQueueSignalWorkloadDone
{
private:
std::queue<T> m_queue; // Use STL queue to store data
boost::mutex m_mutex; // The mutex to synchronise on
boost::condition_variable m_QueueHasData; // The condition to wait for if queue is empty
boost::condition_variable m_QueueHasRoom; // The condition to wait for if queue is full
ResetableEvent m_ProcessingDone; // The conditon to wait for if queue is empty
// and workload is fully processed
unsigned int m_Size; // max queue size
public:
BoundedThreadSafeQueueSignalWorkloadDone(unsigned int Size)
: m_Size(Size)
{
m_ProcessingDone.set();
}
bool Empty()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
return m_queue.empty();
}
bool Full()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
return m_queue.size() >= m_Size;
}
// Push new data on the queue and notify other threads
// waiting for data in this queue
bool TryPush(const T &data)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// Indicate that workload processing is busy
// Fail if queue full
if (m_queue.size() >= m_Size) return false;
// Indicate that workload is being processed
m_ProcessingDone.reset();
// Add the data to the queue
m_queue.push(data);
// Notify others that data is ready
m_QueueHasData.notify_one();
return true;
} // Lock is automatically released here
// Try to push data in queue
// Wait until room in queue
void Push(const T &data)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// While queue is full, wait
// warning: do not replace m_queue.size() == m_Size with Full():
// it will deadlock due to trying to acquire the same m_mutex
// Push has already acquired
while (m_queue.size() >= m_Size) m_QueueHasRoom.wait(lock);
// Indicate that workload is being processed
m_ProcessingDone.reset();
// Now push the data
m_queue.push(data);
// And warn threads that are waiting for data
m_QueueHasData.notify_one();
}
// Get data from the queue.
// Return false if no data available
bool TryPop(T &result)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// When there is no data, return false
if (m_queue.size() == 0) return false;
// Otherwise return the data
// Retrieve the data from the queue
result=m_queue.front(); m_queue.pop();
// Warn threads who are waiting to push data
m_QueueHasRoom.notify_one();
return true;
// Lock is automatically released here
}
// Get data from the queue.
// Wait for data if not available
void WaitAndPop(T &result)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// When there is no data, wait till someone fills it.
// Lock is automatically released in the wait and obtained
// again after the wait
while (m_queue.size() == 0) m_QueueHasData.wait(lock);
// Retrieve the data from the queue
result=m_queue.front(); m_queue.pop();
// Warn threads who are waiting to push data
m_QueueHasRoom.notify_one();
} // Lock is automatically released here
void ProcessingFinished()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// If no new work scheduled,
// we can signal the queue as completely idle
if (m_queue.empty())
m_ProcessingDone.set();
else
std::cout << "[QUEUE] ProcessingFinished but queue not empty. ProcessingDone flag remains low." << std::endl;
}
void WaitUntilCompletelyIdle()
{
std::cout << "[QUEUE] Wait until idle" << std::endl;
m_ProcessingDone.wait();
std::cout << "[QUEUE] Consumer is completely idle" << std::endl;
}
};
template<typename QueueContents, template<typename QueueContents> class QueueType>
class Producer : public WorkerThread<QueueContents, QueueType>
{
public:
Producer(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: WorkerThread<QueueContents, QueueType>(Queue, NoOfIt, Delay)
{}
void Work()
{
for (QueueContents i=0; i< this->m_NoOfIt; ++i)
{
// send numbers 0 to 999 into the queue
boost::this_thread::sleep(boost::posix_time::milliseconds(this->m_Delay));
std::ostringstream msg;
msg << "[PRODUCER] Produced value " << i << std::endl;
std::cout << msg.str();
this->m_Queue.Push(i);
}
}
};
template<typename QueueContents, template<typename QueueContents> class QueueType>
class Consumer : public WorkerThread<QueueContents, QueueType>
{
public:
Consumer(QueueType<QueueContents> &Queue, int NoOfIt, int Delay)
: WorkerThread<QueueContents, QueueType>(Queue, NoOfIt, Delay)
{}
void Work()
{
for (QueueContents i=0; i< this->m_NoOfIt; ++i)
{
// read data from the queue,
// waiting until data available
int num = -1;
this->m_Queue.WaitAndPop(num);
std::ostringstream msg;
msg << "[CONSUMER] Consumed value " << num << std::endl;
std::cout << msg.str();
boost::this_thread::sleep(boost::posix_time::milliseconds(this->m_Delay)); // work hard
this->m_Queue.ProcessingFinished(); // tell the queue we're done working hard
std::ostringstream msg2;
msg2 << "[CONSUMER] Consumption of value " << num << " completely handled." << std::endl;
std::cout << msg2.str();
}
}
};
int main()
{
std::cout << "[MAIN] About to construct queue" << std::endl;
BoundedThreadSafeQueueSignalWorkloadDone<int> Q(3);
std::cout << "[MAIN] About to construct producer" << std::endl;
Producer<int, BoundedThreadSafeQueueSignalWorkloadDone> P(Q, 10, 0);
std::cout << "[MAIN] About to construct consumer" << std::endl;
Consumer<int, BoundedThreadSafeQueueSignalWorkloadDone> C(Q, 10, 100);
std::cout << "[MAIN] About to start producer" << std::endl;
P.Start();
std::cout << "[MAIN] About to start consumer" << std::endl;
C.Start();
for (unsigned int i=0; i<20;++i)
{
C.WaitUntilCompletelyIdle();
boost::this_thread::sleep(boost::posix_time::milliseconds(2));
}
std::cout << "[MAIN] Queue should be empty after all threads finished: " << Q.Empty() << std::endl;
std::cout << "[MAIN] Waiting for producer to finish" << std::endl;
P.WaitUntilFinished();
std::cout << "[MAIN] Waiting for consumer to finish" << std::endl;
C.WaitUntilFinished();
return 0;
}
Error
main.cpp:6:43: error: declaration of template parameter
‘QueueContents’ shadows template parameter template<typename
QueueContents, template<typename QueueContents> class QueueType>
The code is wrong, it violates the rule [temp.local]/6 which disallows re-use of a template parameter name for any other purpose.
A template-parameter shall not be redeclared within its scope (including nested scopes).
The author probably used GCC 5, which didn't implement the rule, but since GCC 6 it is implemented.
Live demo
As a quick fix just remove the template-template argument name. It is not used anyway.
template<typename QueueContents, template<typename> class QueueType>
class WorkerThread
. . .
My little consumer-producer problem had me stumped for some time. I didn't want an implementation where one producer pushes some data round-robin to the consumers, filling up their queues of data respectively.
I wanted to have one producer, x consumers, but the producer waits with producing new data until a consumer is free again. In my example there are 3 consumers so the producer creates a maximum of 3 objects of data at any given time. Since I don't like polling, the consumers were supposed to notify the producer when they are done. Sounds simple, but the solution I found doesn't please me. First the code.
#include "stdafx.h"
#include <mutex>
#include <iostream>
#include <future>
#include <map>
#include <atomic>
std::atomic_int totalconsumed;
class producer {
using runningmap_t = std::map<int, std::pair<std::future<void>, bool>>;
// Secure the map of futures.
std::mutex mutex_;
runningmap_t running_;
// Used for finished notification
std::mutex waitermutex_;
std::condition_variable waiter_;
// The magic number to limit the producer.
std::atomic<int> count_;
bool can_run();
void clean();
// Fake a source, e.g. filesystem scan.
int fakeiter;
int next();
bool has_next() const;
public:
producer() : fakeiter(50) {}
void run();
void notify(int value);
void wait();
};
class consumer {
producer& producer_;
public:
consumer(producer& producer) : producer_(producer) {}
void run(int value) {
std::this_thread::sleep_for(std::chrono::milliseconds(42));
std::cout << "Consumed " << value << " on (" << std::this_thread::get_id() << ")" << std::endl;
totalconsumed++;
producer_.notify(value);
}
};
// Only if less than three threads are active, another gets to run.
bool producer::can_run() { return count_.load() < 3; }
// Verify if there's something to consume
bool producer::has_next() const { return 0 != fakeiter; }
// Produce the next value for consumption.
int producer::next() { return --fakeiter; }
// Remove the futures that have reported to be finished.
void producer::clean()
{
for (auto it = running_.begin(); it != running_.end(); ) {
if (it->second.second) {
it = running_.erase(it);
}
else {
++it;
}
}
}
// Runs the producer. Creates a new consumer for every produced value. Max 3 at a time.
void producer::run()
{
while (has_next()) {
if (can_run()) {
auto c = next();
count_++;
auto future = std::async(&consumer::run, consumer(*this), c);
std::unique_lock<std::mutex> lock(mutex_);
running_[c] = std::make_pair(std::move(future), false);
clean();
}
else {
std::unique_lock<std::mutex> lock(waitermutex_);
waiter_.wait(lock);
}
}
}
// Consumers diligently tell the producer that they are finished.
void producer::notify(int value)
{
count_--;
mutex_.lock();
running_[value].second = true;
mutex_.unlock();
std::unique_lock<std::mutex> waiterlock(waitermutex_);
waiter_.notify_all();
}
// Wait for all consumers to finish.
void producer::wait()
{
while (!running_.empty()) {
mutex_.lock();
clean();
mutex_.unlock();
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
// Looks like the application entry point.
int main()
{
producer p;
std::thread pthread(&producer::run, &p);
pthread.join();
p.wait();
std::cout << std::endl << std::endl << "Total consumed " << totalconsumed.load() << std::endl;
return 0;
}
The part I don't like is the list of values mapped to the futures, called running_. I need to keep the future around until the consumer is actually done. I can't remove the future from the map in the notify method or else I'll kill the thread that is currently calling notify.
Am I missing something that could simplify this construct?
template<class T>
struct slotted_data {
std::size_t I;
T t;
};
template<class T>
using sink = std::function<void(T)>;
template<class T, std::size_t N>
struct async_slots {
bool produce( slotted_data<T> data ) {
if (terminate || data.I>=N) return false;
{
auto l = lock();
if (slots[data.I]) return false;
slots[data.I] = std::move(data.t);
}
cv.notify_one();
return true;
}
// rare use of non-lambda cv.wait in the wild!
bool consume(sink<slotted_data<T>> f) {
auto l = lock();
while(!terminate) {
for (auto& slot:slots) {
if (slot) {
auto r = std::move(*slot);
slot = std::nullopt;
f({std::size_t(&slot-slots.data()), std::move(r)}); // invoke in lock
return true;
}
}
cv.wait(l);
}
return false;
}
// easier and safer version:
std::optional<slotted_data<T>> consume() {
std::optional<slotted_data<T>> r;
bool worked = consume([&](auto&& data) { r = std::move(data); });
if (!worked) return {};
return r;
}
void finish() {
{
auto l = lock();
terminate = true;
}
cv.notify_all();
}
private:
auto lock() { return std::unique_lock<std::mutex>(m); }
std::mutex m;
std::condition_variable cv;
std::array< std::optional<T>, N > slots;
bool terminate = false;
};
async_slots provides a fixed number of slots and an awaitable consume. If you try to produce two things in the same slot, the producer function returns false and ignores you.
consume invokes the sink of the data inside the mutex in a continuation passing style. This permits atomic consumption.
We want to invert producer and consumer:
template<class T, std::size_t N>
struct slotted_consumer {
bool consume( std::size_t I, sink<T> sink ) {
std::optional<T> data;
std::condition_variable cv;
std::mutex m;
bool worked = slots.produce(
{
I,
[&](auto&& t){
{
std::unique_lock<std::mutex> l(m);
data.emplace(std::move(t));
}
cv.notify_one();
}
}
);
if (!worked) return false;
std::unique_lock<std::mutex> l(m);
cv.wait(l, [&]()->bool{
return (bool)data;
});
sink( std::move(*data) );
return true;
}
bool produce( T t ) {
return slots.consume(
[&](auto&& f) {
f.t( std::move(t) );
}
);
}
void finish() {
slots.finish();
}
private:
async_slots< sink<T>, N > slots;
};
we have to take some care to execute sink in a context where we are not holding the mutex of async_slots, which is why consume above is so strange.
Live example.
You share a slotted_consumer< int, 3 > slots. The producing thread repeatedly calls slots.produce(42);. It blocks until a new consumer lines up.
Consumer #2 calls slots.consume( 2, [&](int x){ /* code to consume x */ } ), and #1 and #0 pass their slot numbers as well.
All 3 consumers can be waiting for the next production. The above system defaults to feeding #0 first if it is waiting for more work; we could make it "fair" at a cost of keeping a bit more state.
I just simply get packets from network, and Enqueue them in one thread and then consume this packets (Dequeue) in an other thread.
So i decide to use boost library to make a shared queue based on
https://www.quantnet.com/cplusplus-multithreading-boost/
template <typename T>
class SynchronisedQueue
{
private:
std::queue<T> m_queue; // Use STL queue to store data
boost::mutex m_mutex; // The mutex to synchronise on
boost::condition_variable m_cond;// The condition to wait for
public:
// Add data to the queue and notify others
void Enqueue(const T& data)
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// Add the data to the queue
m_queue.push(data);
// Notify others that data is ready
m_cond.notify_one();
} // Lock is automatically released here
// Get data from the queue. Wait for data if not available
T Dequeue()
{
// Acquire lock on the queue
boost::unique_lock<boost::mutex> lock(m_mutex);
// When there is no data, wait till someone fills it.
// Lock is automatically released in the wait and obtained
// again after the wait
while (m_queue.size()==0) m_cond.wait(lock);
// Retrieve the data from the queue
T result=m_queue.front(); m_queue.pop();
return result;
} // Lock is automatically released here
};
The problem is , while not getting any data, Dequeue() method
blocks my consumer thread, and when i want to terminate consumer
thread i can not able to end it or stop it sometimes.
What is the suggested way to end blocking of Dequeue(), so that i can safely terminate the thread that consume packets?
Any ideas suggestions?
PS: The site https://www.quantnet.com/cplusplus-multithreading-boost/ use "boost::this_thread::interruption_point();" for stopping consumer thread ... Because of my legacy code structure this is not possible for me...
Based on Answer I update Shared Queue like this:
#include <queue>
#include <boost/thread.hpp>
template <typename T>
class SynchronisedQueue
{
public:
SynchronisedQueue()
{
RequestToEnd = false;
EnqueueData = true;
}
void Enqueue(const T& data)
{
boost::unique_lock<boost::mutex> lock(m_mutex);
if(EnqueueData)
{
m_queue.push(data);
m_cond.notify_one();
}
}
bool TryDequeue(T& result)
{
boost::unique_lock<boost::mutex> lock(m_mutex);
while (m_queue.empty() && (! RequestToEnd))
{
m_cond.wait(lock);
}
if( RequestToEnd )
{
DoEndActions();
return false;
}
result= m_queue.front(); m_queue.pop();
return true;
}
void StopQueue()
{
RequestToEnd = true;
Enqueue(NULL);
}
int Size()
{
boost::unique_lock<boost::mutex> lock(m_mutex);
return m_queue.size();
}
private:
void DoEndActions()
{
EnqueueData = false;
while (!m_queue.empty())
{
m_queue.pop();
}
}
std::queue<T> m_queue; // Use STL queue to store data
boost::mutex m_mutex; // The mutex to synchronise on
boost::condition_variable m_cond; // The condition to wait for
bool RequestToEnd;
bool EnqueueData;
};
And Here is my Test Drive:
#include <iostream>
#include <string>
#include "SynchronisedQueue.h"
using namespace std;
SynchronisedQueue<int> MyQueue;
void InsertToQueue()
{
int i= 0;
while(true)
{
MyQueue.Enqueue(++i);
}
}
void ConsumeFromQueue()
{
while(true)
{
int number;
cout << "Now try to dequeue" << endl;
bool success = MyQueue.TryDequeue(number);
if(success)
{
cout << "value is " << number << endl;
}
else
{
cout << " queue is stopped" << endl;
break;
}
}
cout << "Que size is : " << MyQueue.Size() << endl;
}
int main()
{
cout << "Test Started" << endl;
boost::thread startInsertIntoQueue = boost::thread(InsertToQueue);
boost::thread consumeFromQueue = boost::thread(ConsumeFromQueue);
boost::this_thread::sleep(boost::posix_time::seconds(5)); //After 5 seconds
MyQueue.StopQueue();
int endMain;
cin >> endMain;
return 0;
}
For now it seems to work...Based on new suggestions:
i change Stop Method as:
void StopQueue()
{
boost::unique_lock<boost::mutex> lock(m_mutex);
RequestToEnd = true;
m_cond.notify_one();
}
2 easy solutions to let the thread end:
send an end message on the queue.
add another condition to the condition variable to command to end
while(queue.empty() && (! RequestToEnd)) m_cond.wait(lock);
if (RequestToEnd) { doEndActions(); }
else { T result=m_queue.front(); m_queue.pop(); return result; }
First, do you really need to terminate the thread? If not, don't.
If you do have to, then just queue it a suicide pill. I usually send a NULL cast to T. The thread checks T and, if NULL, cleans up, returns and so dies.
Also, you may need to purge the queue first by removing and delete()ing all the items.
Another option that should be considered is not to block infinitely in threads. In other words, add a time out to your blocking calls like so:
bool TryDequeue(T& result, boost::chrono::milliseconds timeout)
{
boost::unique_lock<boost::mutex> lock(m_mutex);
boost::chrono::system_clock::time_point timeLimit =
boost::chrono::system_clock::now() + timeout;
while (m_queue.empty())
{
if (m_cond.wait_until(lock, timeLimit) ==
boost::condition_variable::cv_status::timeout)
{
return false;
}
}
result = m_queue.front(); m_queue.pop();
return true;
}
Then in your thread, just have a variable to indicate if the thread is still running (I took the liberty to make your consumer into a class):
class Consumer
{
public:
boost::shared_ptr<Consumer> createConsumer()
{
boost::shared_ptr<Consumer> ret(new Consumer());
ret->_consumeFromQueue = boost::thread(&Consumer::ConsumeFromQueue, ret.get());
return ret;
}
protected:
Consumer()
: _threadRunning(true)
{
}
~Consumer()
{
_threadRunning = false;
_consumeFromQueue.join();
}
void ConsumeFromQueue()
{
while(_threadRunning == true)
{
int number;
cout << "Now try to dequeue" << endl;
bool success = MyQueue.TryDequeue(number);
if(success)
{
cout << "value is " << number << endl;
}
else
{
cout << " queue is stopped" << endl;
break;
}
}
cout << "Que size is : " << MyQueue.Size() << endl;
}
bool _threadRunning;
boost::thread _consumeFromQueue;
}
There is no need to hack your queue class just so it can be used in a thread, give it normal interfaces with timeouts, and then use it in the proper way based on the use case.
I give more details on why this is a good pattern to follow for threads here:
http://blog.chrisd.info/how-to-run-threads/
Is it true that C++0x will come without semaphores? There are already some questions on Stack Overflow regarding the use of semaphores. I use them (posix semaphores) all the time to let a thread wait for some event in another thread:
void thread0(...)
{
doSomething0();
event1.wait();
...
}
void thread1(...)
{
doSomething1();
event1.post();
...
}
If I would do that with a mutex:
void thread0(...)
{
doSomething0();
event1.lock(); event1.unlock();
...
}
void thread1(...)
{
event1.lock();
doSomethingth1();
event1.unlock();
...
}
Problem: It's ugly and it's not guaranteed that thread1 locks the mutex first (Given that the same thread should lock and unlock a mutex, you also can't lock event1 before thread0 and thread1 started).
So since boost doesn't have semaphores either, what is the simplest way to achieve the above?
You can easily build one from a mutex and a condition variable:
#include <mutex>
#include <condition_variable>
class semaphore {
std::mutex mutex_;
std::condition_variable condition_;
unsigned long count_ = 0; // Initialized as locked.
public:
void release() {
std::lock_guard<decltype(mutex_)> lock(mutex_);
++count_;
condition_.notify_one();
}
void acquire() {
std::unique_lock<decltype(mutex_)> lock(mutex_);
while(!count_) // Handle spurious wake-ups.
condition_.wait(lock);
--count_;
}
bool try_acquire() {
std::lock_guard<decltype(mutex_)> lock(mutex_);
if(count_) {
--count_;
return true;
}
return false;
}
};
Based on Maxim Yegorushkin's answer, I tried to make the example in C++11 style.
#include <mutex>
#include <condition_variable>
class Semaphore {
public:
Semaphore (int count_ = 0)
: count(count_) {}
inline void notify()
{
std::unique_lock<std::mutex> lock(mtx);
count++;
cv.notify_one();
}
inline void wait()
{
std::unique_lock<std::mutex> lock(mtx);
while(count == 0){
cv.wait(lock);
}
count--;
}
private:
std::mutex mtx;
std::condition_variable cv;
int count;
};
I decided to write the most robust/generic C++11 semaphore I could, in the style of the standard as much as I could (note using semaphore = ..., you normally would just use the name semaphore similar to normally using string not basic_string):
template <typename Mutex, typename CondVar>
class basic_semaphore {
public:
using native_handle_type = typename CondVar::native_handle_type;
explicit basic_semaphore(size_t count = 0);
basic_semaphore(const basic_semaphore&) = delete;
basic_semaphore(basic_semaphore&&) = delete;
basic_semaphore& operator=(const basic_semaphore&) = delete;
basic_semaphore& operator=(basic_semaphore&&) = delete;
void notify();
void wait();
bool try_wait();
template<class Rep, class Period>
bool wait_for(const std::chrono::duration<Rep, Period>& d);
template<class Clock, class Duration>
bool wait_until(const std::chrono::time_point<Clock, Duration>& t);
native_handle_type native_handle();
private:
Mutex mMutex;
CondVar mCv;
size_t mCount;
};
using semaphore = basic_semaphore<std::mutex, std::condition_variable>;
template <typename Mutex, typename CondVar>
basic_semaphore<Mutex, CondVar>::basic_semaphore(size_t count)
: mCount{count}
{}
template <typename Mutex, typename CondVar>
void basic_semaphore<Mutex, CondVar>::notify() {
std::lock_guard<Mutex> lock{mMutex};
++mCount;
mCv.notify_one();
}
template <typename Mutex, typename CondVar>
void basic_semaphore<Mutex, CondVar>::wait() {
std::unique_lock<Mutex> lock{mMutex};
mCv.wait(lock, [&]{ return mCount > 0; });
--mCount;
}
template <typename Mutex, typename CondVar>
bool basic_semaphore<Mutex, CondVar>::try_wait() {
std::lock_guard<Mutex> lock{mMutex};
if (mCount > 0) {
--mCount;
return true;
}
return false;
}
template <typename Mutex, typename CondVar>
template<class Rep, class Period>
bool basic_semaphore<Mutex, CondVar>::wait_for(const std::chrono::duration<Rep, Period>& d) {
std::unique_lock<Mutex> lock{mMutex};
auto finished = mCv.wait_for(lock, d, [&]{ return mCount > 0; });
if (finished)
--mCount;
return finished;
}
template <typename Mutex, typename CondVar>
template<class Clock, class Duration>
bool basic_semaphore<Mutex, CondVar>::wait_until(const std::chrono::time_point<Clock, Duration>& t) {
std::unique_lock<Mutex> lock{mMutex};
auto finished = mCv.wait_until(lock, t, [&]{ return mCount > 0; });
if (finished)
--mCount;
return finished;
}
template <typename Mutex, typename CondVar>
typename basic_semaphore<Mutex, CondVar>::native_handle_type basic_semaphore<Mutex, CondVar>::native_handle() {
return mCv.native_handle();
}
in acordance with posix semaphores, I would add
class semaphore
{
...
bool trywait()
{
boost::mutex::scoped_lock lock(mutex_);
if(count_)
{
--count_;
return true;
}
else
{
return false;
}
}
};
And I much prefer using a synchronisation mechanism at a convenient level of abstraction, rather than always copy pasting a stitched-together version using more basic operators.
C++20 finally has semaphores - std::counting_semaphore<max_count>.
These have (at least) the following methods:
acquire() (blocking)
try_acquire() (non-blocking, returns immediately)
try_acquire_for() (non-blocking, takes a duration)
try_acquire_until() (non-blocking, takes a time at which to stop trying)
release()
You can read these CppCon 2019 presentation slides, or watch the video. There's also the official proposal P0514R4, but it may not be up-to-date with actual C++20.
You can also check out cpp11-on-multicore - it has a portable and optimal semaphore implementation.
The repository also contains other threading goodies that complement c++11 threading.
You can work with mutex and condition variables. You gain exclusive access with the mutex, check whether you want to continue or need to wait for the other end. If you need to wait, you wait in a condition. When the other thread determines that you can continue, it signals the condition.
There is a short example in the boost::thread library that you can most probably just copy (the C++0x and boost thread libs are very similar).
Also can be useful RAII semaphore wrapper in threads:
class ScopedSemaphore
{
public:
explicit ScopedSemaphore(Semaphore& sem) : m_Semaphore(sem) { m_Semaphore.Wait(); }
ScopedSemaphore(const ScopedSemaphore&) = delete;
~ScopedSemaphore() { m_Semaphore.Notify(); }
ScopedSemaphore& operator=(const ScopedSemaphore&) = delete;
private:
Semaphore& m_Semaphore;
};
Usage example in multithread app:
boost::ptr_vector<std::thread> threads;
Semaphore semaphore;
for (...)
{
...
auto t = new std::thread([..., &semaphore]
{
ScopedSemaphore scopedSemaphore(semaphore);
...
}
);
threads.push_back(t);
}
for (auto& t : threads)
t.join();
I found the shared_ptr and weak_ptr, a long with a list, did the job I needed. My issue was, I had several clients wanting to interact with a host's internal data. Typically, the host updates the data on it's own, however, if a client requests it, the host needs to stop updating until no clients are accessing the host data. At the same time, a client could ask for exclusive access, so that no other clients, nor the host, could modify that host data.
How I did this was, I created a struct:
struct UpdateLock
{
typedef std::shared_ptr< UpdateLock > ptr;
};
Each client would have a member of such:
UpdateLock::ptr m_myLock;
Then the host would have a weak_ptr member for exclusivity, and a list of weak_ptrs for non-exclusive locks:
std::weak_ptr< UpdateLock > m_exclusiveLock;
std::list< std::weak_ptr< UpdateLock > > m_locks;
There is a function to enable locking, and another function to check if the host is locked:
UpdateLock::ptr LockUpdate( bool exclusive );
bool IsUpdateLocked( bool exclusive ) const;
I test for locks in LockUpdate, IsUpdateLocked, and periodically in the host's Update routine. Testing for a lock is as simple as checking if the weak_ptr's expired, and removing any expired from the m_locks list (I only do this during the host update), I can check if the list is empty; at the same time, I get automatic unlocking when a client resets the shared_ptr they are hanging onto, which also happens when a client gets destroyed automatically.
The over all effect is, since clients rarely need exclusivity (typically reserved for additions and deletions only), most of the time a request to LockUpdate( false ), that is to say non-exclusive, succeeds so long as (! m_exclusiveLock). And a LockUpdate( true ), a request for exclusivity, succeeds only when both (! m_exclusiveLock) and (m_locks.empty()).
A queue could be added to mitigate between exclusive and non-exclusive locks, however, I have had no collisions thus far, so I intend to wait until that happens to add the solution (mostly so I have a real-world test condition).
So far this is working well for my needs; I can imagine the need to expand this, and some issues that might arise over expanded use, however, this was quick to implement, and required very little custom code.
There old question but I would like to offer another solution.
It seems you need a not semathore but a event like Windows Events.
Very effective events can be done like following:
#ifdef _MSC_VER
#include <concrt.h>
#else
// pthread implementation
#include <cstddef>
#include <cstdint>
#include <shared_mutex>
namespace Concurrency
{
const unsigned int COOPERATIVE_TIMEOUT_INFINITE = (unsigned int)-1;
const size_t COOPERATIVE_WAIT_TIMEOUT = SIZE_MAX;
class event
{
public:
event();
~event();
size_t wait(unsigned int timeout = COOPERATIVE_TIMEOUT_INFINITE);
void set();
void reset();
static size_t wait_for_multiple(event** _PPEvents, size_t _Count, bool _FWaitAll, unsigned int _Timeout = COOPERATIVE_TIMEOUT_INFINITE);
static const unsigned int timeout_infinite = COOPERATIVE_TIMEOUT_INFINITE;
private:
int d;
std::shared_mutex guard;
};
};
namespace concurrency = Concurrency;
#include <unistd.h>
#include <errno.h>
#include <sys/eventfd.h>
#include <sys/epoll.h>
#include <chrono>
#include "../HandleHolder.h"
typedef CommonHolder<int, close> fd_holder;
namespace Concurrency
{
int watch(int ep_fd, int fd)
{
epoll_event ep_event;
ep_event.events = EPOLLIN;
ep_event.data.fd = fd;
return epoll_ctl(ep_fd, EPOLL_CTL_ADD, fd, &ep_event);
}
event::event()
: d(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK))
{
}
event::~event()
{
std::unique_lock<std::shared_mutex> lock(guard);
close(d);
d = -1;
}
size_t event::wait(unsigned int timeout)
{
fd_holder ep_fd(epoll_create1(EPOLL_CLOEXEC));
{
std::shared_lock<std::shared_mutex> lock(guard);
if (d == -1 || watch(ep_fd.GetHandle(), d) < 0)
return COOPERATIVE_WAIT_TIMEOUT;
}
epoll_event ep_event;
return epoll_wait(ep_fd.GetHandle(), &ep_event, 1, timeout) == 1 && (ep_event.events & EPOLLIN) ? 0 : COOPERATIVE_WAIT_TIMEOUT;
}
void event::set()
{
uint64_t count = 1;
write(d, &count, sizeof(count));
}
void event::reset()
{
uint64_t count;
read(d, &count, sizeof(count));
}
size_t event::wait_for_multiple(event** _PPEvents, size_t _Count, bool _FWaitAll, unsigned int _Timeout)
{
if (_FWaitAll) // not implemented
std::abort();
const auto deadline = _Timeout != COOPERATIVE_TIMEOUT_INFINITE ? std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now().time_since_epoch()).count() + _Timeout : COOPERATIVE_TIMEOUT_INFINITE;
fd_holder ep_fd(epoll_create1(EPOLL_CLOEXEC));
int fds[_Count];
for (int i = 0; i < _Count; ++i)
{
std::shared_lock<std::shared_mutex> lock(_PPEvents[i]->guard);
fds[i] = _PPEvents[i]->d;
if (fds[i] != -1 && watch(ep_fd.GetHandle(), fds[i]) < 0)
fds[i] = -1;
}
epoll_event ep_events[_Count];
// Вызов epoll_wait может быть прерван сигналом. Ждём весь тайм-аут, так же, как в Windows
int res = 0;
while (true)
{
res = epoll_wait(ep_fd.GetHandle(), &ep_events[0], _Count, _Timeout);
if (res == -1 && errno == EINTR && std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now().time_since_epoch()).count() < deadline)
continue;
break;
}
for (int i = 0; i < _Count; ++i)
{
if (fds[i] == -1)
continue;
for (int j = 0; j < res; ++j)
if (ep_events[j].data.fd == fds[i] && (ep_events[j].events & EPOLLIN))
return i;
}
return COOPERATIVE_WAIT_TIMEOUT;
}
};
#endif
And then just use concurrency::event
Different from other answers, I propose a new version which:
Unblocks all waiting threads before being deleted. In this case, deleting the semaphore will wake up all waiting threads and only after everybody wakes up, the semaphore destructor will exit.
Has a parameter to the wait() call, to automatically unlock the calling thread after the timeout in milliseconds has passed.
Has an options on the construtor to limit available resources count only up to the count the semaphore was initialized with. This way, calling notify() too many times will not increase how many resources the semaphore has.
#include <stdio.h>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <iostream>
std::recursive_mutex g_sync_mutex;
#define sync(x) do { \
std::unique_lock<std::recursive_mutex> lock(g_sync_mutex); \
x; \
} while (false);
class Semaphore {
int _count;
bool _limit;
int _all_resources;
int _wakedup;
std::mutex _mutex;
std::condition_variable_any _condition_variable;
public:
/**
* count - how many resources this semaphore holds
* limit - limit notify() calls only up to the count value (available resources)
*/
Semaphore (int count, bool limit)
: _count(count),
_limit(limit),
_all_resources(count),
_wakedup(count)
{
}
/**
* Unlock all waiting threads before destructing the semaphore (to avoid their segfalt later)
*/
virtual ~Semaphore () {
std::unique_lock<std::mutex> lock(_mutex);
_wakeup(lock);
}
void _wakeup(std::unique_lock<std::mutex>& lock) {
int lastwakeup = 0;
while( _wakedup < _all_resources ) {
lock.unlock();
notify();
lock.lock();
// avoids 100% CPU usage if someone is not waking up properly
if (lastwakeup == _wakedup) {
std::this_thread::sleep_for( std::chrono::milliseconds(10) );
}
lastwakeup = _wakedup;
}
}
// Mutex and condition variables are not movable and there is no need for smart pointers yet
Semaphore(const Semaphore&) = delete;
Semaphore& operator =(const Semaphore&) = delete;
Semaphore(const Semaphore&&) = delete;
Semaphore& operator =(const Semaphore&&) = delete;
/**
* Release one acquired resource.
*/
void notify()
{
std::unique_lock<std::mutex> lock(_mutex);
// sync(std::cerr << getTime() << "Calling notify(" << _count << ", " << _limit << ", " << _all_resources << ")" << std::endl);
_count++;
if (_limit && _count > _all_resources) {
_count = _all_resources;
}
_condition_variable.notify_one();
}
/**
* This function never blocks!
* Return false if it would block when acquiring the lock. Otherwise acquires the lock and return true.
*/
bool try_acquire() {
std::unique_lock<std::mutex> lock(_mutex);
// sync(std::cerr << getTime() << "Calling try_acquire(" << _count << ", " << _limit << ", " << _all_resources << ")" << std::endl);
if(_count <= 0) {
return false;
}
_count--;
return true;
}
/**
* Return true if the timeout expired, otherwise return false.
* timeout - how many milliseconds to wait before automatically unlocking the wait() call.
*/
bool wait(int timeout = 0) {
std::unique_lock<std::mutex> lock(_mutex);
// sync(std::cerr << getTime() << "Calling wait(" << _count << ", " << _limit << ", " << _all_resources << ")" << std::endl);
_count--;
_wakedup--;
try {
std::chrono::time_point<std::chrono::system_clock> timenow = std::chrono::system_clock::now();
while(_count < 0) {
if (timeout < 1) {
_condition_variable.wait(lock);
}
else {
std::cv_status status = _condition_variable.wait_until(lock, timenow + std::chrono::milliseconds(timeout));
if ( std::cv_status::timeout == status) {
_count++;
_wakedup++;
return true;
}
}
}
}
catch (...) {
_count++;
_wakedup++;
throw;
}
_wakedup++;
return false;
}
/**
* Return true if calling wait() will block the calling thread
*/
bool locked() {
std::unique_lock<std::mutex> lock(_mutex);
return _count <= 0;
}
/**
* Return true the semaphore has at least all resources available (since when it was created)
*/
bool freed() {
std::unique_lock<std::mutex> lock(_mutex);
return _count >= _all_resources;
}
/**
* Return how many resources are available:
* - 0 means not free resources and calling wait() will block te calling thread
* - a negative value means there are several threads being blocked
* - a positive value means there are no threads waiting
*/
int count() {
std::unique_lock<std::mutex> lock(_mutex);
return _count;
}
/**
* Wake everybody who is waiting and reset the semaphore to its initial value.
*/
void reset() {
std::unique_lock<std::mutex> lock(_mutex);
if(_count < 0) {
_wakeup(lock);
}
_count = _all_resources;
}
};
Utility to print the current timestamp:
std::string getTime() {
char buffer[20];
#if defined( WIN32 )
SYSTEMTIME wlocaltime;
GetLocalTime(&wlocaltime);
::snprintf(buffer, sizeof buffer, "%02d:%02d:%02d.%03d ", wlocaltime.wHour, wlocaltime.wMinute, wlocaltime.wSecond, wlocaltime.wMilliseconds);
#else
std::chrono::time_point< std::chrono::system_clock > now = std::chrono::system_clock::now();
auto duration = now.time_since_epoch();
auto hours = std::chrono::duration_cast< std::chrono::hours >( duration );
duration -= hours;
auto minutes = std::chrono::duration_cast< std::chrono::minutes >( duration );
duration -= minutes;
auto seconds = std::chrono::duration_cast< std::chrono::seconds >( duration );
duration -= seconds;
auto milliseconds = std::chrono::duration_cast< std::chrono::milliseconds >( duration );
duration -= milliseconds;
time_t theTime = time( NULL );
struct tm* aTime = localtime( &theTime );
::snprintf(buffer, sizeof buffer, "%02d:%02d:%02d.%03ld ", aTime->tm_hour, aTime->tm_min, aTime->tm_sec, milliseconds.count());
#endif
return buffer;
}
Example program using this semaphore:
// g++ -o test -Wall -Wextra -ggdb -g3 -pthread test.cpp && gdb --args ./test
// valgrind --leak-check=full --show-leak-kinds=all --track-origins=yes --verbose ./test
// procdump -accepteula -ma -e -f "" -x c:\ myexe.exe
int main(int argc, char* argv[]) {
std::cerr << getTime() << "Creating Semaphore" << std::endl;
Semaphore* semaphore = new Semaphore(1, false);
semaphore->wait(1000);
semaphore->wait(1000);
std::cerr << getTime() << "Auto Unlocking Semaphore wait" << std::endl;
std::this_thread::sleep_for( std::chrono::milliseconds(5000) );
delete semaphore;
std::cerr << getTime() << "Exiting after 10 seconds..." << std::endl;
return 0;
}
Example output:
11:03:01.012 Creating Semaphore
11:03:02.012 Auto Unlocking Semaphore wait
11:03:07.012 Exiting after 10 seconds...
Extra function which uses a EventLoop to unlock the semaphores after some time:
std::shared_ptr<std::atomic<bool>> autowait(Semaphore* semaphore, int timeout, EventLoop<std::function<void()>>& eventloop, const char* source) {
std::shared_ptr<std::atomic<bool>> waiting(std::make_shared<std::atomic<bool>>(true));
sync(std::cerr << getTime() << "autowait '" << source << "'..." << std::endl);
if (semaphore->try_acquire()) {
eventloop.enqueue( timeout, [waiting, source, semaphore]{
if ( (*waiting).load() ) {
sync(std::cerr << getTime() << "Timeout '" << source << "'..." << std::endl);
semaphore->notify();
}
} );
}
else {
semaphore->wait(timeout);
}
return waiting;
}
Semaphore semaphore(1, false);
EventLoop<std::function<void()>>* eventloop = new EventLoop<std::function<void()>>(true);
std::shared_ptr<std::atomic<bool>> waiting_something = autowait(&semaphore, 45000, eventloop, "waiting_something");
In case someone is interested in the atomic version, here is the implementation. The performance is expected better than the mutex & condition variable version.
class semaphore_atomic
{
public:
void notify() {
count_.fetch_add(1, std::memory_order_release);
}
void wait() {
while (true) {
int count = count_.load(std::memory_order_relaxed);
if (count > 0) {
if (count_.compare_exchange_weak(count, count-1, std::memory_order_acq_rel, std::memory_order_relaxed)) {
break;
}
}
}
}
bool try_wait() {
int count = count_.load(std::memory_order_relaxed);
if (count > 0) {
if (count_.compare_exchange_strong(count, count-1, std::memory_order_acq_rel, std::memory_order_relaxed)) {
return true;
}
}
return false;
}
private:
std::atomic_int count_{0};
};