Let me start with a short example
class A
{
public:
A() :
thread_([this] {
std::unique_lock<std::mutex> lk(mtx_);
cv_.wait(lk, [this] { return quit_; });
})
{
throw std::exception();
}
~A()
{
quit_ = true;
cv_.notify_one();
thread_.join();
}
private:
bool quit_ = false;
std::mutex mtx_;
std::condition_variable cv_;
std::thread thread_;
};
int main()
{
try { A a; }
catch (...) {}
// Does not reach this point
}
Because an exception is thrown in the constructor, the members are being destructed in reverse order.
the thread_ member is still joinable so terminate is being called.
It is pretty straightforward to write a thread wrapper class with a customizable destructor.
For example, something like that:
class ThreadWrapper
{
public:
ThreadWrapper(std::function<void(std::thread & t)> onDestruct, std::thread&& thread) :
onDestruct_(onDestruct),
thread_(std::move(thread))
{
}
~ThreadWrapper()
{
onDestruct_(thread_);
}
private:
std::function<void(std::thread & t)> onDestruct_;
std::thread thread_;
};
class B
{
public:
B() :
thread_(
[this](std::thread& t) {
quit_ = true;
cv_.notify_one();
t.join();
},
std::thread([this] {
std::unique_lock<std::mutex> lk(mtx_);
cv_.wait(lk, [this] { return quit_; });
}))
{
throw std::exception();
}
private:
bool quit_ = false;
std::mutex mtx_;
std::condition_variable cv_;
ThreadWrapper thread_;
};
However, I was surprised not to find something like that in the boost library.
The closest thing I found is the scoped_thread which lets you pass a specific action as a template parameter.
Am I missing something?
Related
I wrote my implementation of std::shared_mutex, but on my tests it worked for several minutes, replaced notify_one with notify_all, it began to work for 20 milliseconds. This is because of the overhead of waking up exactly one conditional variable, why it works more slowly than that notify_all.
class RWLock {
public:
template <class Func>
void Read(Func func) {
std::unique_lock<std::mutex> lock(mutex_);
no_writer_.wait(lock, [this] { return !write_; });
++read_cnt_;
lock.unlock();
try {
func();
} catch (...) {
End();
throw;
}
End();
}
template <class Func>
void Write(Func func) {
std::unique_lock<std::mutex> lock(mutex_);
no_readers_.wait(lock, [this] { return read_cnt_ == 0; });
write_ = true;
try {
func();
} catch (...) {
write_ = false;
throw;
}
write_ = false;
no_writer_.notify_all();
}
private:
std::mutex mutex_;
std::condition_variable no_writer_;
std::condition_variable no_readers_;
int read_cnt_ = 0;
bool write_ = false;
void End() {
mutex_.lock();
--read_cnt_;
no_readers_.notify_all();
mutex_.unlock();
}
};
You signal the condition while the mutex locked.
There is a common optimization you may like to try: release the mutex before calling notify_one/notify_all.
Im trying to make a EventTask that calls a function passed in a loop.
I need it to wait to start then mark it as finished.
My problem is i dont know how to receive the arguments from my wait function to pass to the function that is called
As you can see the problem is in my taskFunc _event.wait should set the arguments to pass onto the function.
class Event
{
public:
Event() : _signalled(false) {}
virtual inline void notify(){
std::unique_lock<std::mutex> lock(_mutex);
_signalled = true;
_condvar.notify_one();
}
virtual inline void wait(){
std::unique_lock<std::mutex> lock(_mutex);
_condvar.wait(lock, [&] { return _signalled; });
_signalled = false;
stop();
}
virtual inline void stop(){
std::unique_lock<std::mutex> lock(_mutex);
_signalled = false;
}
private:
std::mutex _mutex;
std::condition_variable _condvar;
bool _signalled;
};
template <class T>
class EventArg : public Event
{
public:
virtual inline void notify(T arg){
Event::notify();
this->arg = arg;
}
virtual inline void wait(T& arg){
Event::wait();
arg = this->arg;
}
private:
T arg;
};
template<class... Args>
class EventTask
{
public:
EventTask(std::function<void(Args...)> func) : m_Function(func), m_Run(true), m_thread(&taskFunc, this) {}
void notify(Args&& ...Params) {
_Event.notify(std::forward<Args>(Params)...); }
void wait() {
_EventFinished.wait(); }
void stop() {
m_stop = true;
_Event.stop();
}
private:
void taskFunc()
{
void* pArg = nullptr;
while (m_Run){
_Event.wait(pArg);
m_Function(std::forward<Args>(Params)...);
_EventFinished.notify();
}
}
private:
std::function<void(Args...)> m_Function;
bool m_Run;
std::thread m_thread;
EventArg<Args...> _Event;
Event _EventFinished;
};
Try this:
#include <iostream>
#include <functional>
#include <condition_variable>
#include <mutex>
#include <thread>
#include <tuple>
template<class... Args>
class EventTask
{
public:
EventTask(std::function<void(Args...)> func) : m_Function(func), m_Run(true) {
m_thread = std::thread{ [=]() {
taskFunc();
}};
}
~EventTask() {
stop();
m_thread.join();
}
void notify(const std::tuple<Args...> &args) {
std::unique_lock<std::mutex> lock(_mutex);
_signalled = true;
_args = args;
_condvar.notify_all();
}
void stop() {
m_Run = false;
_condvar.notify_all();
}
private:
void taskFunc()
{
std::tuple<Args...> args;
while (true){
{
std::unique_lock<std::mutex> lock(_mutex);
_condvar.wait(lock, [&] { return m_Run && _signalled; });
if (!m_Run) break;
_signalled = false;
args = _args;
}
std::apply(m_Function, args);
//_EventFinished.notify();
}
}
private:
std::function<void(Args...)> m_Function;
std::tuple<Args...> _args;
std::mutex _mutex;
std::condition_variable _condvar;
bool _signalled = false;
//Event _EventFinished;
bool m_Run;
std::thread m_thread;
};
int main()
{
EventTask<int, int> ma{ [](int a, int b) {
}};
ma.notify({ 1, 2 });
}
What is going on here? There are two threads, "producer" thread (the one, that produces arguments for function, hence the name) and "consumer" thread (the one, that actually does the running).
"producer" thread locks mutex, copies arguments and notifies, that there is something to be done. "consumer" thread locks mutex, then waits on condition. Waiting on condition (and mutex) releases the mutex, which will be reaquired, when notification on condition variable comes. When "producer" variable sets arguments, "consumer" will awoke, reacquire the mutex (this is required, otherwise "producer" might set args twice in a row resulting a race, which is undefined behavior), once again copies arguments and releases mutex. Then it continues with calling the worker function, using it's own local copy of arguments.
Similar process goes, when you try to stop the whole thing. "producer" locks mutex, sets m_Run to false and notifies everyone. "consumer" thread awoke, notifies, that m_Run is false and breads from the loop, ending it's thread. Note, that this won't break worker function, that is already in progress - you've to wait (note the call to join in destructor) for it to finish.
I have a simple event class, everything works fine with it.
Im trying to add a template class that i can pass arguments to and then read it when wait finishes.
At the moment i have it working with struct only as i need to set the arg to 0 if im not using it.
Is there a better/easier way to do this so i can pass any type of argument to it?
class Event
{
public:
Event() : _signalled(false) {}
virtual void notify()
{
std::unique_lock<std::mutex> lock(_mutex);
_signalled = true;
_condvar.notify_one();
}
virtual void wait()
{
std::unique_lock<std::mutex> lock(_mutex);
_condvar.wait(lock, [&] { return _signalled; });
_signalled = false;
}
void stop()
{
std::unique_lock<std::mutex> lock(_mutex);
_signalled = false;
}
private:
std::mutex _mutex;
std::condition_variable _condvar;
bool _signalled;
};
template <class T>
class ArgEvent : public Event
{
public:
void notify()
{
Event::notify();
this->arg = { 0 };
}
void notify(T arg)
{
Event::notify();
this->arg = arg;
}
T getArg()
{
return this->arg;
}
void wait()
{
Event::wait();
}
void wait(T& arg)
{
Event::wait();
arg = this->arg;
}
private:
T arg;
};
Boost.Optional allows the implementation to work without c++17.
https://www.boost.org/doc/libs/1_67_0/libs/optional/doc/html/index.html
Suppose I have a class with the following interface:
class IEvent
{
void SetEvent() = 0;
void WaitForEvent() = 0;
}
WaitForEvent() is a blocking function which waits until another thread calls SetEvent() function.
I'm writing unit tests for this class and want following scenario:
First thread calls WaitForEvent(). After that second thread calls SetEvent().
How to synchronize this threads that SetEvent() call will always follow WaitForEvent() call?
I don't want to use any sleeps because I want unit tests to run as fast as possible.
This was the best I could manage
#include <mutex>
#include <condition_variable>
#include <thread>
#include <iostream>
class IEvent
{
public:
virtual void SetEvent() = 0;
virtual void WaitForEvent() = 0;
};
class EventClass : public IEvent
{
public:
void SetEvent() override {
std::unique_lock<std::mutex> lock { _mutex };
std::cout << "setting event\n";
_event = true;
lock.unlock();
_cv.notify_all();
}
void WaitForEvent() override {
std::unique_lock<std::mutex> lock { _mutex };
std::cout << "waiting for event\n";
_cv.wait(lock, [this]{ return _event; });
std::cout << "waiting complete\n";
};
private:
std::mutex _mutex;
std::condition_variable _cv;
bool _event = false;
};
int main()
{
EventClass object;
std::mutex cv_mutex;
std::condition_variable may_set_cv;
bool may_set_event = false;
auto setting_thread = std::thread([&]{
std::unique_lock<std::mutex> lock { cv_mutex };
may_set_cv.wait(lock,
[&] {
return may_set_event;
});
object.SetEvent();
});
std::unique_lock<std::mutex> lock { cv_mutex };
may_set_event = true;
lock.unlock();
may_set_cv.notify_one();
// tiny race condition here
object.WaitForEvent();
setting_thread.join();
}
I have the following code that runs functions on a dedicated thread. It works great except for the destructor. The call to thread_.join() does not return. I am using VS2013 Express.
What would I change so that the thread joins correctly?
#include <atomic>
#include <condition_variable>
#include <mutex>
#include <thread>
#include <vector>
namespace
{
class main_thread
{
public:
static auto instance() -> main_thread&
{
static main_thread instance_;
return instance_;
}
auto enque(std::function<void()> func) -> void
{
{
std::lock_guard<std::mutex> lock{ mutex_ };
queue_.push_back(func);
}
condition_.notify_one();
}
private:
main_thread()
{
continue_.test_and_set();
thread_ = std::thread{ std::bind(std::mem_fn(&main_thread::run), this) };
}
~main_thread()
{
continue_.clear();
condition_.notify_all();
if (thread_.joinable())
{
thread_.join();
}
}
main_thread(const main_thread &other) = delete;
main_thread(main_thread &&other) = delete;
main_thread& operator=(const main_thread &other) = delete;
main_thread& operator=(main_thread &&other) = delete;
auto run() -> void
{
while (continue_.test_and_set())
{
auto lock = std::unique_lock<std::mutex>{ mutex_ };
//condition_.wait_for(lock, std::chrono::milliseconds(1));
condition_.wait(lock);
for (auto &func : queue_)
{
func();
}
queue_.clear();
}
}
std::condition_variable condition_;
std::mutex mutex_;
std::vector<std::function<void()>> queue_;
std::thread thread_;
std::atomic_flag continue_;
};
}
auto on_main_thread(std::function<void()> func) -> void
{
main_thread::instance().enque(std::move(func));
}
auto on_main_thread_sync(std::function<void()> func) -> void
{
bool done{ false };
on_main_thread([&]{
func();
done = true;
});
while (!done);
}
The only function exercising this code is
int main()
{
on_main_thread([]{});
}
This avoids the issue of the race in on_main_thread_sync but still has the lockup in ~main_thread. Visual Studio indicates that there are 2 threads, but neither is in main_thread::run, so I do not understand what is going on. That function exited correctly, but it for some reason the thread is not ending.
You should not call external code from within a critical section of code, this can easily lead to deadlocks.
If you pause execution in the debugger, you may see that you have one or more threads waiting to acquire _mutex.
You won't be able to acquire the unique_lock on _mutex again if any of the code called from func() tries to enqueue().
Try releasing the lock once the condition_variable wait is over. As a test, you can put in an extra scope to see if this helps:
while (continue_.test_and_set())
{
std::vector<std::function<void()>> queue;
{
auto lock = std::unique_lock<std::mutex>{ mutex_ };
//condition_.wait_for(lock, std::chrono::milliseconds(1));
condition_.wait(lock);
queue.swap(queue_);
}
for (auto &func : queue)
{
func();
}
}
You have a potential live-lock in your code at shutdown. The following interleaving is possible:
main() thread thread in run()
check continue_, see it is true
set continue_ = false
notify the condition variable
join
wait on condition variable
To avoid this, you need the condition check and cv wait to happen atomically. This is most easily accomplished by protecting continue_ with mutex_ (Live at Coliru):
class main_thread
{
public:
static auto instance() -> main_thread&
{
static main_thread instance_;
return instance_;
}
auto enque(std::function<void()> func) -> void
{
{
std::lock_guard<std::mutex> lock{ mutex_ };
queue_.push_back(func);
}
condition_.notify_one();
}
private:
main_thread() : continue_{true}
{
thread_ = std::thread{ &main_thread::run, this };
}
~main_thread()
{
{
std::lock_guard<std::mutex> lock{ mutex_ };
continue_ = false;
}
condition_.notify_all();
if (thread_.joinable())
{
thread_.join();
}
}
auto run() -> void
{
std::unique_lock<std::mutex> lock{ mutex_ };
while(continue_)
{
if(queue_.empty())
{
condition_.wait(lock);
continue;
}
std::vector<std::function<void()>> queue;
queue.swap(queue_);
lock.unlock();
for (auto &func : queue)
{
func();
}
lock.lock();
}
}
std::condition_variable condition_;
std::mutex mutex_;
std::vector<std::function<void()>> queue_;
bool continue_;
std::thread thread_;
};