Locking copy operation of std::shared_ptr inside lambda - c++

For this sample code:
#include <iostream>
#include <thread>
#include <mutex>
#include <memory>
struct A
{
int _i;
A(int i):_i(i)
{
std::cout<<"A() "<<_i<<std::endl;
}
~A()
{
std::cout<<"~A() "<<_i<<std::endl;
}
void Print()
{
std::cout<<"Print() "<<_i<<std::endl;
}
};
struct B
{
std::shared_ptr<A> Asp;
std::mutex AspMutex;
void SetA()
{
static int i = 0;
std::unique_lock<std::mutex> lock(AspMutex);
Asp = std::make_shared<A>(i);
}
void AccessA1()
{
std::shared_ptr<A> aspCopy;
{
std::unique_lock<std::mutex> lock(AspMutex);
aspCopy = Asp;
}
(*aspCopy).Print();
}
void AccessA2()
{
auto aspCopy = [&]()
{
std::unique_lock<std::mutex> lock(AspMutex);
return Asp;
}();
(*aspCopy).Print();
}
void AccessA3()
{
(*[&]()
{
std::unique_lock<std::mutex> lock(AspMutex);
return Asp;
}()
).Print();
}
};
int main()
{
B b;
b.SetA();
std::thread t([&]{b.SetA();});
b.AccessA1();
b.AccessA2();
b.AccessA3();
t.join();
}
I'm curious if c++17 (or later) standard will guarantee that A::Access1 and A::Access2 methods are thread safe (copy of std::shared_ptr will be protected by lock).

Yes. The lock makes A::Access1 and A::Access2 thread safe with concurrent SetA. This is still true in C++17.

Related

(C++) How to use Payload Object to imiplement thread pool?

I saw this very well implemented thread pool: https://github.com/progschj/ThreadPool. I am wondering whether I can use a payload object instead. The idea is that instead of using a function pointer, use an object to describe the payload, which always contains a run function and a promise. The main thread then wait on the future of the promise.
Here is what I got:
#include <iostream>
#include <queue>
#include <thread>
#include <future>
#include <condition_variable>
#include <mutex>
class GenericPayload {
protected:
std::promise <int> m_returnCode;
public:
virtual void run() = 0;
std::future <int> getFuture() {
return m_returnCode.get_future();
}
};
class MyPayload:public GenericPayload {
private:
int m_input1;
int m_input2;
int m_result;
public:
MyPayload(int input1, int input2):m_input1(input1), m_input2(input2) {}
void run() {
m_result = m_input1 * m_input2;
m_returnCode.set_value(0);
}
int getResult() {
return m_result;
}
};
class ThreadPool {
private:
std::queue <GenericPayload *> payloads;
std::mutex queue_mutex;
std::condition_variable cv;
std::vector< std::thread > workers;
bool stop;
public:
ThreadPool(size_t threads)
: stop(false)
{
for(size_t i = 0;i<threads;++i)
workers.emplace_back(
[this]
{
for(;;)
{
GenericPayload *payload;
{
std::unique_lock<std::mutex> lock(this->queue_mutex);
this->cv.wait(lock,
[this]{ return this->stop || !this->payloads.empty(); });
if(this->stop)
return;
payload = this->payloads.front();
this->payloads.pop();
}
payload->run();
}
}
);
}
void addPayLoad (GenericPayload *payload) {
payloads.push(payload);
}
~ThreadPool()
{
{
std::unique_lock<std::mutex> lock(queue_mutex);
stop = true;
}
cv.notify_all();
for(std::thread &worker: workers)
worker.join();
}
};
int main() {
MyPayload myPayload(3, 5);
ThreadPool threadPool(2);
std::future <int> returnCode = myPayload.getFuture();
threadPool.addPayLoad(&myPayload);
returnCode.get();
std::cout << myPayload.getResult() << std::endl;
}
Is this the right way to do it though? I had to pass a pointer to the payload because 1. I want GenericPayload to be abstract and 2. std::promise is not copyable. Thx for any feedback.

Using smart pointer for critical section

What is the drawbacks or errors in the following approach to use non-owning std::unique_ptr having custom deleter to arrange a critical section?
#include <memory>
#include <shared_mutex>
#include <optional>
#include <variant>
#include <cassert>
struct Data
{
std::optional<int> i;
};
struct DataLocker
{
std::variant<std::unique_lock<std::shared_mutex>, std::shared_lock<std::shared_mutex>> lock;
void operator () (const Data *)
{
std::visit([] (auto & lock) { if (lock) lock.unlock(); }, lock);
}
};
struct DataHolder
{
std::unique_ptr<Data, DataLocker> getLockedData()
{
return {&data, {std::unique_lock<std::shared_mutex>{m}}};
}
std::unique_ptr<const Data, DataLocker> getLockedData() const
{
return {&data, {std::shared_lock<std::shared_mutex>{m}}};
}
private :
mutable std::shared_mutex m;
Data data;
};
#include <iostream>
#include <thread>
int main()
{
DataHolder d;
auto producer = [&d]
{
d.getLockedData()->i = 123;
};
auto consumer = [&d = std::as_const(d)]
{
for (;;) {
if (const auto i = d.getLockedData()->i) {
std::cout << *i << std::endl;
return;
}
}
};
std::thread p(producer);
std::thread c(consumer);
p.join();
c.join();
}
One corner case, when writer reset()s a pointer and never destruct std::unique_ptr itself is covered by adding unlock to deleter's operator ().

Misuse of conditional variable

Could you please review and suggest what is wrong with this code?
It either crashes on line 21 (cond_var_.wait(lock); in the gc_thread_proc()) or locks on line 56 (lock.lock(); in release()).
#include <condition_variable>
#include <deque>
#include <functional>
#include <mutex>
#include <thread>
#include <vector>
#include <iostream>
class stream {
std::deque<int> pending_cleanups_;
std::mutex mut_{};
bool continue_{true};
std::thread gc_worker_;
std::condition_variable cond_var_;
void gc_thread_proc() {
while (true) {
std::vector<int> events_to_clean;
std::unique_lock<std::mutex> lock(mut_);
while (pending_cleanups_.empty() && continue_) {
cond_var_.wait(lock);
}
if (!continue_) {
break;
}
std::move(std::begin(pending_cleanups_), std::end(pending_cleanups_), std::back_inserter(events_to_clean));
pending_cleanups_.clear();
}
}
public:
explicit stream() : gc_worker_(&stream::gc_thread_proc, this) {}
void register_pending_event(int val) {
{
std::lock_guard<std::mutex> lock_guard(mut_);
pending_cleanups_.push_back(val);
}
cond_var_.notify_one();
}
void release() {
std::unique_lock<std::mutex> lock(mut_);
if (!continue_) {
return;
}
continue_ = false;
lock.unlock();
cond_var_.notify_one();
gc_worker_.join();
lock.lock();
pending_cleanups_.clear();
}
~stream() { release(); }
};
int main() {
int N=100000;
while(N--) {
std::cout << ".";
stream s;
}
std::cout << "ok";
return 0;
}
Changing order of members makes this problem go away - when cond_var_ is put before the gc_worker_ problem doesn't reproduce. But I guess it doesn't fix it just hides it somehow...
non-static data members are initialized in order of declaration in the class definition: https://en.cppreference.com/w/cpp/language/initializer_list
3) Then, non-static data members are initialized in order of declaration in the class definition.
In your case, since your std::thread member is initialized to start executing in its constructor, cv may not be initialized when it's used in gc_thread_proc. A command way to have a std::thread member is to move assign it in the class contructor, i.e.
class stream {
std::thread gc_worker_;
std::condition_variable cond_var_;
public:
stream(): {
gc_work = std::move(std::thread(&stream::gc_thread_proc, this));
}
};

How should I improve a thread pool to make it more thread safe?

I am currently learning the basics about thread pooling. Here are some code blocks that I have written taking into account some examples found on the web:
SyncQueue.h
#ifndef SYNC_QUEUE_H
#define SYNC_QUEUE_H
#include <list>
#include <mutex>
#include <iostream>
template<typename T>
class SyncQueue {
public:
SyncQueue();
~SyncQueue();
SyncQueue(const SyncQueue&) = delete;
SyncQueue& operator=(const SyncQueue &) = delete;
void append(const T& data);
T& get();
unsigned long size();
bool empty();
private:
std::list<T> queue;
std::mutex myMutex;
};
#endif
SyncQueue.cpp
#include "SyncQueue.h"
template<typename T>
SyncQueue<T>::SyncQueue():
queue(),
myMutex() {}
template<typename T>
SyncQueue<T>::~SyncQueue() {}
template<typename T>
void SyncQueue<T>::append(const T& data) {
std::unique_lock<std::mutex> l(myMutex);
queue.push_back(data);
}
template<typename T>
T& SyncQueue<T>::get() {
std::unique_lock<std::mutex> l(myMutex);
T& res = queue.front();
queue.pop_front();
return res;
}
template<typename T>
unsigned long SyncQueue<T>::size() {
std::unique_lock<std::mutex> l(myMutex);
return queue.size();
}
template<typename T>
bool SyncQueue<T>::empty() {
std::unique_lock<std::mutex> l(myMutex);
return queue.empty();
}
template class SyncQueue<std::function<void()>>;
ThreadPool.h
#ifndef THREAD_POOL_H
#define THREAD_POOL_H
#include <atomic>
#include <functional>
#include <mutex>
#include <thread>
#include <vector>
#include "SyncQueue.h"
class ThreadPool {
public:
ThreadPool(unsigned long thrdAmount = 0);
virtual ~ThreadPool();
void appendTask(std::function<void()> func);
unsigned long pendingTasks();
private:
void runThread();
unsigned int myThrdAmount;
std::atomic<bool> done;
SyncQueue<std::function<void()>> syncQueue;
std::vector<std::thread> threads;
std::condition_variable myCondVar;
std::mutex myMutex;
};
#endif
ThreadPool.cpp
#include "ThreadPool.h"
ThreadPool::ThreadPool(unsigned long thrdAmount):
myThrdAmount(0),
done(false),
syncQueue(),
threads(),
myCondVar(),
myMutex() {
if (thrdAmount > 0) {
myThrdAmount = thrdAmount;
} else {
myThrdAmount = std::thread::hardware_concurrency();
}
for (unsigned int i = 0; i < myThrdAmount; i++) {
threads.push_back(std::thread(&ThreadPool::runThread, this));
}
}
ThreadPool::~ThreadPool() {
done = true;
myCondVar.notify_all();
for (auto& thrd: threads) {
if (thrd.joinable()) {
thrd.join();
}
}
}
void ThreadPool::appendTask(std::function<void()> func) {
syncQueue.append(func);
{
std::unique_lock<std::mutex> l(myMutex);
myCondVar.notify_one();
}
}
unsigned long ThreadPool::pendingTasks() {
return syncQueue.size();
}
void ThreadPool::runThread() {
while (!done) {
if (syncQueue.empty()) {
std::unique_lock<std::mutex> l(myMutex);
myCondVar.wait(l);
continue;
}
syncQueue.get()();
}
}
main.cpp
#include <unistd.h>
#include <iostream>
#include "ThreadPool.h"
void print() {
std::cout << "Hello World!" << std::endl;
}
int main(int argc, char const *argv[]) {
ThreadPool p;
for (int i = 0; i < 20; i++) {
p.appendTask(print);
}
std::cout << "Pending: " << p.pendingTasks() << std::endl;
sleep(5);
for (int i = 0; i < 20; i++) {
p.appendTask(print);
}
return 0;
}
Despite all the operations on a SyncQueue are locked by a mutex and the condition variable of the ThreadPool is also protected by a mutex, the code often results in undefined behaviours.
That said, can you please explain me where the code is lacking of thread safety? How should I improved it?
void ThreadPool::appendTask(std::function<void()> func) {
syncQueue.append(func);
{
std::unique_lock<std::mutex> l(myMutex);
myCondVar.notify_one();
}
}
void ThreadPool::runThread() {
while (!done) {
if (syncQueue.empty()) {
std::unique_lock<std::mutex> l(myMutex);
myCondVar.wait(l);
continue;
}
syncQueue.get()();
}
}
The problem is that myMutex doesn't actually protect anything. So your code has a catstrophic race condition around waiting for the queue.
Consider:
Thread calling runThread sees syncQueue is empty.
Thread calling appendTask adds job to the queue and calls notify_one. There is no thread to notify.
Thread calling runThread finally gets the lock on myMutex and waits on the condition variable, but the queue isn't empty.
It is absolutely vital that the condition variable you use for waiting be associated with the mutex that protects the predicate you are waiting for. The entire purpose of a condition variable is to allow you to atomically unlock the predicate and wait for a signal without a race condition. But you buried the predicate inside the syncQueue, defeating the condition variable's lock handling logic.
You can fix this race condition by making all calls into syncQueue under the protection of the myMutex mutex. But it might make a lot more sense to make syncQueue waitable. This may make it harder to shut down the thread pool though.

Accessing counter from two threads

I have a counter that is being incremented from one thread. In the main thread, I basically print it out by calling data member of a class. In the below code, nothing is being printed out.
#include <iostream>
#include <thread>
#include <windows.h>
#include <mutex>
std::mutex mut;
class Foo
{
public:
Foo(const int& m) : m_delay(m), m_count(0)
{}
void update()
{
std::cout << "count: " << this->m_count << std::endl;
}
void operator()()
{
while (true){
mut.lock();
m_count++;
mut.unlock();
Sleep(m_delay);
}
}
private:
int m_delay;
int m_count;
};
Foo *obj = new Foo(200);
int main()
{
std::thread *t = new std::thread(*obj);
t->join();
while(true)
{
obj->update();
Sleep(10);
}
return 0;
}
The problem with the original code is that this copies the Foo object:
std::thread *t = new std::thread(*obj);
That means that the increments happen to the copy, and so the value in the original Foo never changes, and so when main prints it out (if you move the misplaced join()) the value is always the same.
A solution is to use a reference not a copy:
std::thread *t = new std::thread(std::ref(*obj));
You also need to protect the read of the variable by the mutex (or use std::atomic<int> for the counter) to avoid undefined behaviour caused by concurrently reading and writing a non-atomic variable.
You should also stop using mut.lock() and mut.unlock() directly, use a scoped lock instead.
There's also no need to create things on the heap unnecessarily, overusing new is a bad habit of people who learnt Java and C# first.
You can also make the code portable by replacing the Windows-specific Sleep call with standard C++.
A correct version would be:
#include <iostream>
#include <thread>
#include <chrono>
#include <mutex>
std::mutex mut;
class Foo
{
public:
Foo(std::chrono::milliseconds m) : m_delay(m), m_count(0)
{}
void update()
{
int count = 0;
{
std::lock_guard<std::mutex> lock(mut);
count = m_count;
}
std::cout << "count: " << count << std::endl;
}
void operator()()
{
while (true)
{
{
std::lock_guard<std::mutex> lock(mut);
m_count++;
}
std::this_thread::sleep_for(m_delay);
}
}
private:
std::chrono::milliseconds m_delay;
int m_count;
};
Foo obj(std::chrono::milliseconds(200));
int main()
{
std::thread t(std::ref(obj));
while(true)
{
obj.update();
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
t.join();
return 0;
}
Alternatively, use an atomic variable so you don't need the mutex:
#include <iostream>
#include <thread>
#include <chrono>
#include <atomic>
class Foo
{
public:
Foo(std::chrono::milliseconds m) : m_delay(m), m_count(0)
{}
void update()
{
std::cout << "count: " << m_count << std::endl;
}
void operator()()
{
while (true)
{
m_count++;
std::this_thread::sleep_for(m_delay);
}
}
private:
std::chrono::milliseconds m_delay;
std::atomic<int> m_count;
};
Foo obj(std::chrono::milliseconds(200));
int main()
{
std::thread t(std::ref(obj));
while(true)
{
obj.update();
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
t.join();
return 0;
}