The proper way of terminating the worker thread - c++

Hi I am trying to find out the best peaceful way of terminating a worker thread. I have the following code:
class test{
public:
test() {}
~test() {}
std::atomic<bool> worker_done;
int a;
void pr() {
while (true) {
if (worker_done) {
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(500));
printf("%d \n", a++);
}
}
std::thread* m_acqThread;
void continuous() {
m_acqThread = new std::thread(&test::pr, this);
}
void stopThread(){
if (m_acqThread) {
if (m_acqThread->joinable())
m_acqThread->join();
delete m_acqThread;
m_acqThread = nullptr;
}
}
};
int main(){
test t;
t.continuous();
std::this_thread::sleep_for(std::chrono::milliseconds(2000));
t.worker_done = true;
t.stopThread();
std::string str;
std::cin.clear();
getline(std::cin, str);
return 0;
Is there a better way of notifying the worker's thread to be terminated other than setting "worker_done" to be true ?
Thanks

What you have is fine: if you have a thread that say starts when your program opens, and as your program closes you need to stop it, using an atomic<bool> is the right way to do this.
It's possible to also use std::atomic_flag like so:
#include <thread>
#include <atomic>
#include <iostream>
std::atomic_flag lock;
int n = 0;
void t()
{
while (lock.test_and_set())
{
++n;
std::this_thread::sleep_for(std::chrono::milliseconds(250));
}
}
int main()
{
lock.test_and_set();
std::thread t(&t);
std::this_thread::sleep_for(std::chrono::seconds(2));
lock.clear();
t.join();
std::cout << n << std::endl;
std::cin.get();
}
You can read about why you might wish to choose atomic_flag over atomic<bool>, but personally I prefer the use of atomic<bool> for things like this, as it's simply more readable:
std::atomic<bool> runThread;
int n = 0;
void t()
{
while (runThread)
{
++n;
std::this_thread::sleep_for(std::chrono::milliseconds(250));
}
}
int main()
{
runThread = true;
std::thread t(&t);
std::this_thread::sleep_for(std::chrono::seconds(2));
runThread = false;
t.join();
std::cout << n << std::endl;
std::cin.get();
}

Related

Thread pool not completing all tasks

I have asked a simpler version of this question before and got the correct answer: Thread pools not working with large number of tasks
Now I am trying to run tasks from an object of a class in parallel using a thread pool. My task is simple and only prints a number for that instance of class. I am expecting numbers 0->9 get printed but instead I get some numbers get printed more than once and some numbers not printed at all. Can anyone see what I am doing wrong with creating tasks in my loop?
#include "iostream"
#include "ThreadPool.h"
#include <chrono>
#include <thread>
using namespace std;
using namespace dynamicThreadPool;
class test {
int x;
public:
test(int x_in) : x(x_in) {}
void task()
{
cout << x << endl;
}
};
int main(void)
{
thread_pool pool;
for (int i = 0; i < 10; i++)
{
test* myTest = new test(i);
std::function<void()> myFunction = [&] {myTest->task(); };
pool.submit(myFunction);
}
while (!pool.isQueueEmpty())
{
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
cout << "waiting for tasks to complete" << endl;
}
return 0;
}
And here is my thread pool, I got this definition from "C++ Concurrency in Action" book:
#pragma once
#include <queue>
#include <future>
#include <list>
#include <functional>
#include <memory>
template<typename T>
class threadsafe_queue
{
private:
mutable std::mutex mut;
std::queue<T> data_queue;
std::condition_variable data_cond;
public:
threadsafe_queue() {}
void push(T new_value)
{
std::lock_guard<std::mutex> lk(mut);
data_queue.push(std::move(new_value));
data_cond.notify_one();
}
void wait_and_pop(T& value)
{
std::unique_lock<std::mutex> lk(mut);
data_cond.wait(lk, [this] {return !data_queue.empty(); });
value = std::move(data_queue.front());
data_queue.pop();
}
bool try_pop(T& value)
{
std::lock_guard<std::mutex> lk(mut);
if (data_queue.empty())
return false;
value = std::move(data_queue.front());
data_queue.pop();
return true;
}
bool empty() const
{
std::lock_guard<std::mutex> lk(mut);
return data_queue.empty();
}
};
class join_threads
{
std::vector<std::thread>& threads;
public:
explicit join_threads(std::vector<std::thread>& threads_) : threads(threads_) {}
~join_threads()
{
for (unsigned long i = 0; i < threads.size(); i++)
{
if (threads[i].joinable())
{
threads[i].join();
}
}
}
};
class thread_pool
{
std::atomic_bool done;
threadsafe_queue<std::function<void()> > work_queue;
std::vector<std::thread> threads;
join_threads joiner;
void worker_thread()
{
while (!done)
{
std::function<void()> task;
if (work_queue.try_pop(task))
{
task();
}
else
{
std::this_thread::yield();
}
}
}
public:
thread_pool() : done(false), joiner(threads)
{
unsigned const thread_count = std::thread::hardware_concurrency();
try
{
for (unsigned i = 0; i < thread_count; i++)
{
threads.push_back(std::thread(&thread_pool::worker_thread, this));
}
}
catch (...)
{
done = true;
throw;
}
}
~thread_pool()
{
done = true;
}
template<typename FunctionType>
void submit(FunctionType f)
{
work_queue.push(std::function<void()>(f));
}
bool isQueueEmpty()
{
return work_queue.empty();
}
};
There's too much code to analyse all of it but you take a pointer by reference here:
{
test* myTest = new test(i);
std::function<void()> myFunction = [&] {myTest->task(); };
pool.submit(myFunction);
} // pointer goes out of scope
After that pointer has gone out of scope you will have undefined behavior if you later do myTest->task();.
To solve that immediate problem, copy the pointer and delete the object afterwards to not leak memory:
{
test* myTest = new test(i);
std::function<void()> myFunction = [=] {myTest->task(); delete myTest; };
pool.submit(myFunction);
}
I suspect this could be solved without using new at all, but I'll leave that up to you.

Thread safe ExpiringDeque data structure in C++

I am trying to create a data structure, ExpiringDeque. It should be somewhat similar to std::deque. Let's say I need only push_back(), size() and pop_front(). The data structure needs to automatically expire up to N first elements every T seconds.
This data structure needs to manage its own queue and expiration thread internally.
How do I write it in a thread safe way? This is an example that I came up with, does this seem reasonable? What am I missing?
#include <algorithm>
#include <atomic>
#include <cassert>
#include <deque>
#include <mutex>
#include <thread>
#include <unistd.h>
#include <iostream>
template <typename T>
class ExpiringDeque {
public:
ExpiringDeque(int n, int t) : numElements_(n), interval_(t), running_(true), items_({}) {
expiringThread_ = std::thread{[&] () {
using namespace std::chrono_literals;
int waitCounter = 0;
while (true) {
if (!running_) {
return;
}
std::this_thread::sleep_for(1s);
if (waitCounter++ < interval_) {
continue;
}
std::lock_guard<std::mutex> guard(mutex_);
waitCounter = 0;
int numToErase = std::min(numElements_, static_cast<int>(items_.size()));
std::cout << "Erasing " << numToErase << " elements\n";
items_.erase(items_.begin(), items_.begin() + numToErase);
}
}};
}
~ExpiringDeque() {
running_ = false;
expiringThread_.join();
}
T pop_front() {
if (items_.size() == 0) {
throw std::out_of_range("Empty deque");
}
std::lock_guard<std::mutex> guard(mutex_);
T item = items_.front();
items_.pop_front();
return item;
}
int size() {
std::lock_guard<std::mutex> guard(mutex_);
return items_.size();
}
void push_back(T item) {
std::lock_guard<std::mutex> guard(mutex_);
items_.push_back(item);
}
private:
int numElements_;
int interval_;
std::atomic<bool> running_;
std::thread expiringThread_;
std::mutex mutex_;
std::deque<T> items_;
};
int main() {
ExpiringDeque<int> ed(10, 3);
ed.push_back(1);
ed.push_back(2);
ed.push_back(3);
assert(ed.size() == 3);
assert(ed.pop_front() == 1);
assert(ed.size() == 2);
// wait for expiration
sleep(5);
assert(ed.size() == 0);
ed.push_back(10);
assert(ed.size() == 1);
assert(ed.pop_front() == 10);
return 0;
}
You can avoid an unnecessary wait in the destructor of ExpiringDeque by using a condition variable. I would also use std::condition_variable::wait_for with a predicate to check the running_ flag. This will ensure that you either wait for a timeout or a notification, whichever is earlier. You avoid using waitCounter and continue this way.
Another thing you should do is lock the mutex before checking the size of your deque in pop_front(), otherwise it's not thread safe.
Here's an updated version of your code:
template <typename T>
class ExpiringDeque {
public:
ExpiringDeque(int n, int t) : numElements_(n), interval_(t), running_(true), items_({}), cv_() {
expiringThread_ = std::thread{ [&]() {
using namespace std::chrono_literals;
while (true) {
//Wait for timeout or notification
std::unique_lock<std::mutex> lk(mutex_);
cv_.wait_for(lk, interval_ * 1s, [&] { return !running_; });
if (!running_)
return;
//Mutex is locked already - no need to lock again
int numToErase = std::min(numElements_, static_cast<int>(items_.size()));
std::cout << "Erasing " << numToErase << " elements\n";
items_.erase(items_.begin(), items_.begin() + numToErase);
}
} };
}
~ExpiringDeque() {
//Set flag and notify worker thread
{
std::lock_guard<std::mutex> lk(mutex_);
running_ = false;
}
cv_.notify_one();
expiringThread_.join();
}
T pop_front() {
std::lock_guard<std::mutex> guard(mutex_);
if (items_.size() == 0) {
throw std::out_of_range("Empty deque");
}
T item = items_.front();
items_.pop_front();
return item;
}
...
private:
int numElements_;
int interval_;
bool running_;
std::thread expiringThread_;
std::mutex mutex_;
std::deque<T> items_;
std::condition_variable cv_;
};
You can make the running_ flag a normal bool since the std::condition_variable::wait_for atomically checks for the timeout or notification.

Simple worker thread in C++ class

Assume that there is a class which contains some data and calculates some results given queries, and the queries take a relatively large amount of time.
An example class (everything dummy) is:
#include <vector>
#include <numeric>
#include <thread>
struct do_some_work
{
do_some_work(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
{}
void update_query(size_t x) {
if (x < _data.size()) {
_current_query = x;
recalculate_result();
}
}
int get_result() const {
return _last_calculated_result;
}
private:
void recalculate_result() {
//dummy some work here
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
_last_calculated_result = std::accumulate(_data.cbegin(), _data.cbegin() + _current_query, 0);
}
std::vector<int> const _data;
size_t _current_query;
int _last_calculated_result;
};
and this can be used in the main code like:
#include <algorithm>
int main()
{
//make some dummy data
std::vector<int> test_data(20, 0);
std::iota(test_data.begin(), test_data.end(), 0);
{
do_some_work work(test_data);
for (size_t i = 0; i < test_data.size(); ++i) {
work.update_query(i);
std::cout << "result = {" << i << "," << work.get_result() << "}" << std::endl;
}
}
}
The above will wait in the main function a lot.
Now, assuming we want to run this querying in a tight loop (say GUI) and only care about about getting a "recent" result quickly when we query.
So, we want to move the work to a separate thread which calculates the results, and updates it, and when we get result, we get the last calculated one. That is, we want to change do_some_work class to do its work on a thread, with minimal changes (essentially find a pattern of changes that can be applied to (mostly) any class of this type).
My stab at this is the following:
#include <vector>
#include <numeric>
#include <mutex>
#include <thread>
#include <condition_variable>
#include <iostream>
struct do_lots_of_work
{
do_lots_of_work(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
, _worker()
, _data_mtx()
, _result_mtx()
, _cv()
, _do_exit(false)
, _work_available(false)
{
start_worker();
}
void update_query(size_t x) {
{
if (x < _data.size()) {
std::lock_guard<std::mutex> lck(_data_mtx);
_current_query = x;
_work_available = true;
_cv.notify_one();
}
}
}
int get_result() const {
std::lock_guard<std::mutex> lck(_result_mtx);
return _last_calculated_result;
}
~do_lots_of_work() {
stop_worker();
}
private:
void start_worker() {
if (!_worker.joinable()) {
std::cout << "starting worker..." << std::endl;
_worker = std::thread(&do_lots_of_work::worker_loop, this);
}
}
void stop_worker() {
std::cout << "worker stopping..." << std::endl;
if (_worker.joinable()) {
std::unique_lock<std::mutex> lck(_data_mtx);
_do_exit = true;
lck.unlock();
_cv.notify_one();
_worker.join();
}
std::cout << "worker stopped" << std::endl;
}
void worker_loop() {
std::cout << "worker started" << std::endl;
while (true) {
std::unique_lock<std::mutex> lck(_data_mtx);
_cv.wait(lck, [this]() {return _work_available || _do_exit; });
if (_do_exit) { break; }
if (_work_available) {
_work_available = false;
int query = _current_query; //take local copy
lck.unlock(); //unlock before doing lots of work.
recalculate_result(query);
}
}
}
void recalculate_result(int query) {
//dummy lots of work here
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
int const result = std::accumulate(_data.cbegin(), _data.cbegin() + query, 0);
set_result(result);
}
void set_result(int result) {
std::lock_guard<std::mutex> lck(_result_mtx);
_last_calculated_result = result;
}
std::vector<int> const _data;
size_t _current_query;
int _last_calculated_result;
std::thread _worker;
mutable std::mutex _data_mtx;
mutable std::mutex _result_mtx;
std::condition_variable _cv;
bool _do_exit;
bool _work_available;
};
and the usage is (example):
#include <algorithm>
int main()
{
//make some dummy data
std::vector<int> test_data(20, 0);
std::iota(test_data.begin(), test_data.end(), 0);
{
do_lots_of_work work(test_data);
for (size_t i = 0; i < test_data.size(); ++i) {
work.update_query(i);
std::this_thread::sleep_for(std::chrono::milliseconds(500));
std::cout << "result = {" << i << "," << work.get_result() << "}" << std::endl;
}
}
}
This seems to work, giving the last result, not stopping the main function etc.
But, this looks a LOT of changes are required to add a worker thread to a simple class like do_some_work. Items like two mutexes (one for the worker/main interaction data, and one for the result), one condition_variable, one more-work-available flag and one do-exit flag, that is quite a bit. I guess we don't want an async kind of mechanism because we don't want to potentially launch a new thread every time.
Now, I am not sure if there is a MUCH simpler pattern to make this kind of change, but it feels like there should be. A kind of pattern that can be used to off-load work to a thread.
So finally, my question is, can do_some_work be converted into do_lots_of_work in a much simpler way than the implementation above?
Edit (Solution 1) ThreadPool based:
Using a threadpool, the worker loop can be skipped, we need two mutexes, for result and query. Lock in updating query, Lock in getting result, Both lock in recalculate (take a local copy of a query, and write to result).
Note: Also, when pushing work on the queue, as we do not care about the older results, we can clear the work queue.
Example implementation (using the CTPL threadpool)
#include "CTPL\ctpl_stl.h"
#include <vector>
#include <mutex>
struct do_lots_of_work_with_threadpool
{
do_lots_of_work_with_threadpool(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
, _pool(1)
, _result_mtx()
, _query_mtx()
{
}
void update_query(size_t x) {
if (x < _data.size()) {
std::lock_guard<std::mutex> lck(_query_mtx);
_current_query = x;
}
_pool.clear_queue(); //clear as we don't want to calculate any out-date results.
_pool.push([this](int id) { recalculate_result(); });
}
int get_result() const {
std::lock_guard<std::mutex> lck(_result_mtx);
return _last_calculated_result;
}
private:
void recalculate_result() {
//dummy some work here
size_t query;
{
std::lock_guard<std::mutex> lck(_query_mtx);
query = _current_query;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
int result = std::accumulate(_data.cbegin(), _data.cbegin() + query, 0);
{
std::lock_guard<std::mutex> lck(_result_mtx);
_last_calculated_result = result;
}
}
std::vector<int> const _data;
size_t _current_query;
int _last_calculated_result;
ctpl::thread_pool _pool;
mutable std::mutex _result_mtx;
mutable std::mutex _query_mtx;
};
Edit (Solution 2) With ThreadPool and Atomic:
This solution changes the shared variables to atomic, and so we do not need any mutexes and do not have to consider taking/releasing locks etc. This is much simpler and very close to the original class (of course assumes a threadpool type exists somewhere as it is not part of the standard).
#include "CTPL\ctpl_stl.h"
#include <vector>
#include <mutex>
#include <atomic>
struct do_lots_of_work_with_threadpool_and_atomics
{
do_lots_of_work_with_threadpool_and_atomics(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
, _pool(1)
{
}
void update_query(size_t x) {
if (x < _data.size()) {
_current_query.store(x);
}
_pool.clear_queue(); //clear as we don't want to calculate any out-date results.
_pool.push([this](int id) { recalculate_result(); });
}
int get_result() const {
return _last_calculated_result.load();
}
private:
void recalculate_result() {
//dummy some work here
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
_last_calculated_result.store(std::accumulate(_data.cbegin(), _data.cbegin() + _current_query.load(), 0));
}
std::vector<int> const _data;
std::atomic<size_t> _current_query;
std::atomic<int> _last_calculated_result;
ctpl::thread_pool _pool;
};

C++ sleep for undetermined amount of time

In my code I want my system to sleep, until a condition has been met. An after having searched i have found #include <unistd.h>, but to me it just looks like it sleeps until the time frame has been met. I was wondering if there was a easy way to make the program wait until the condition has been reached.
Here you have a sample of the code to clarify my point
bool check() {
while (condition) {
sleep.here();
} else {
run.the.rest();
}
}
Based on your incomplete pseudo-code and description, here is a class contidion_t, where you can set your condition via set_condition, and a thread blocking in loop will wake up, every time you set it.
#include <iostream>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <atomic>
struct condition_t {
public:
template <typename T>
void loop(T the_rest) {
while(running) {
std::unique_lock<std::mutex> lock_guard(m);
cv.wait(lock_guard, [this] { return ready.load(); });
the_rest();
ready = false;
}
}
void set_condition(bool value) {
ready = value;
if (value) {
cv.notify_one();
}
}
void stop_running() {
running = false;
ready = true;
cv.notify_all();
}
~condition_t () {stop_running();}
private:
std::mutex m;
std::condition_variable cv;
std::atomic<bool> ready{false};
std::atomic<bool> running{true};
};
int main() {
condition_t condition;
std::thread thread(&condition_t::loop<void (void)>, &condition, [] () {
std::cout << "Doing the rest" << std::endl;
});
std::cout << "Thread created but waits\nInput something for continue:";
int something;
std::cin >> something;
std::cout << "Continueing\n";
condition.set_condition(true);
std::cout << "Input something to stop running:";
std::cin >> something;
condition.stop_running();
thread.join();
}

Sync queue between two threads

This is a simple program which has a function start() which waits for user to enter something(using infinite loop) and stores it in queue. start() runs in a separate thread. After user enters some value, the size of queue remains zero in main. How can the queue be synchronized?
code: source.cpp
#include <iostream>
#include "kl.h"
using namespace std;
int main()
{
std::thread t1(start);
while (1)
{
if (q.size() > 0)
{
std::cout << "never gets inside this if\n";
std::string first = q.front();
q.pop();
}
}
t1.join();
}
code: kl.h
#include <queue>
#include <iostream>
#include <string>
void start();
static std::queue<std::string> q;
code: kl.cpp
#include "kl.h"
using namespace std;
void start()
{
char i;
string str;
while (1)
{
for (i = 0; i <= 1000; i++)
{
//other stuff and str input
q.push(str);
}
}
}
Your code contains a race - by me it crashed; both threads are potentially modifying a shared queue. (Also, you're looping with char i for values up to 1000 - not a good idea, probably.)
You should protect your shared queue with a std::mutex, and use a std::condition_variable to notify that there is a reason to check the queue.
Specifically, you should consider the following (which is very common for your case of a producer consumer):
Access the queue only when holding the mutex.
Use the condition variable to notify that you've pushed something into it.
Use the condition variable to specify a condition on when there's a point to continue processing.
Here is a rewrite of your code:
#include <iostream>
#include <queue>
#include <thread>
#include <condition_variable>
#include <mutex>
using namespace std;
std::queue<std::string> q;
std::mutex m;
std::condition_variable cv;
void start()
{
string str;
for (std::size_t i = 0; i <= 1000; i++) {
//other stuff and str input
std::cout << "here" << std::endl;
std::unique_lock<std::mutex> lk(m);
q.push(str);
lk.unlock();
cv.notify_one();
}
}
int main()
{
std::thread t1(start);
for (std::size_t i = 0; i <= 1000; i++)
{
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, []{return !q.empty();});
std::string first = q.front();
q.pop();
}
t1.join();
}
My synced queue class example and its usage:
template<typename T>
class SyncQueue
{
std::queue<T> m_Que;
std::mutex m_Lock;
std::condition_variable m_ConVar;
public:
void enque(T item)
{
std::unique_lock<std::mutex> lock(m_Lock);
m_Que.push(item);
lock.unlock();
m_ConVar.notify_all();
}
T deque()
{
std::unique_lock<std::mutex> lock(m_Lock);
do
{
m_ConVar.wait(lock);
} while(m_Que.size() == 0); // extra check from spontaneous notifications
auto ret = m_Que.front();
m_Que.pop();
return ret;
}
};
int main()
{
using namespace std::chrono_literals;
SyncQueue<int> sq;
std::thread consumer([&sq]()
{
std::cout << "consumer" << std::endl;
for(;;)
{
std::cout << sq.deque() << std::endl;
}
});
std::thread provider([&sq]()
{
std::this_thread::sleep_for(1s);
sq.enque(1);
std::this_thread::sleep_for(3s);
sq.enque(2);
std::this_thread::sleep_for(5s);
sq.enque(3);
});
consumer.join();
return 0;
}
/* Here I have a code snippate with Separate class for
Producing and Consuming along with buffer class */
#include <iostream>
#include <mutex>
#include <condition_variable>
#include <thread>
#include <deque>
#include <vector>
using namespace std;
mutex _mutex_1,_mutex_2;
condition_variable cv;
template <typename T>
class Queue
{
deque<T> _buffer;
const unsigned int max_size = 10;
public:
Queue() = default;
void push(const T& item)
{
while(1)
{
unique_lock<mutex> locker(_mutex_1);
cv.wait(locker,[this](){ return _buffer.size() < max_size; });
_buffer.push_back(item);
locker.unlock();
cv.notify_all();
return;
}
}
T pop()
{
while(1)
{
unique_lock<mutex> locker(_mutex_1);
cv.wait(locker,[this](){ return _buffer.size() > 0; });
int back = _buffer.back();
_buffer.pop_back();
locker.unlock();
cv.notify_all();
return back;
}
}
};
class Producer
{
Queue<int>* _buffer;
public:
Producer(Queue<int>* _buf)
{
this->_buffer = _buf;
}
void run()
{
while(1)
{
auto num = rand()%100;
_buffer->push(num);
_mutex_2.lock();
cout<<"Produced:"<<num<<endl;
this_thread::sleep_for(std::chrono::milliseconds(50));
_mutex_2.unlock();
}
}
};
class Consumer
{
Queue<int>* _buffer;
public:
Consumer(Queue<int>* _buf)
{
this->_buffer = _buf;
}
void run()
{
while(1)
{
auto num = _buffer->pop();
_mutex_2.lock();
cout<<"Consumed:"<<num<<endl;
this_thread::sleep_for(chrono::milliseconds(50));
_mutex_2.unlock();
}
}
};
void client()
{
Queue<int> b;
Producer p(&b);
Consumer c(&b);
thread producer_thread(&Producer::run, &p);
thread consumer_thread(&Consumer::run, &c);
producer_thread.join();
consumer_thread.join();
}
int main()
{
client();
return 0;
}