There is a simple example of using Condition_variable:
#include <iostream> // std::cout
#include <thread> // std::thread
#include <mutex> // std::mutex, std::unique_lock
#include <condition_variable> // std::condition_variable
std::mutex mtx;
std::condition_variable cv;
int global_status = 0;
void print_id(int id)
{
std::unique_lock<std::mutex> lck(mtx);
while (global_status == 0)
{
cv.wait(lck);
}
std::cout << "thread " << id << '\n';
}
int main()
{
std::thread threads[10];
for (int i = 0; i < 10; ++i)
{
threads[i] = std::thread(print_id, i);
}
std::cout << "Start" << std::endl;
std::this_thread::sleep_for(std::chrono::seconds(5));
{
std::unique_lock<std::mutex> lck(mtx);
global_status = 1;
cv.notify_all();
}
for (auto& th : threads) th.join();
return 0;
}
I still can't figure out why block the global_status variable when I change its value?
I change the value global_status from only one thread - why then they block it mutex ? Or is it not necessary?
I change the value global_status from only one thread - why then they block it mutex
You need the mutex because you read the value in different threads.
Related
I am new to C++11 and using threading features. In the following program, the main thread starts 9 worker threads and pushes data into a queue and then goes to wait for thread termination. I see that the worker threads don't get woken up and the program just hangs.
#include <iostream>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <queue>
#include <vector>
#include <chrono>
#include <future>
#include <atomic>
using namespace std::chrono_literals;
std::mutex _rmtx;
std::mutex _wmtx;
std::queue<unsigned long long> dataq;
std::condition_variable _rcv;
std::condition_variable _wcv;
std::atomic_bool termthd;
void thdfunc(const int& num)
{
std::cout << "starting thread#" << num << std::endl;
std::unique_lock<std::mutex> rul(_rmtx);
while (true) {
while(!_rcv.wait_until(rul, std::chrono::steady_clock::now() + 10ms, [] {return !dataq.empty() || termthd.load(); }));
if (termthd.load()) {
std::terminate();
}
std::cout<<"thd#" << num << " : " << dataq.front() <<std::endl;
dataq.pop();
_wcv.notify_one();
}
}
int main()
{
std::vector<std::thread*> thdvec;
std::unique_lock<std::mutex> wul(_rmtx);
unsigned long long data = 0ULL;
termthd.store(false);
for (int i = 0; i < 9; i++) {
thdvec.push_back(new std::thread(thdfunc, i));
}
for ( data = 0ULL; data < 2ULL; data++) {
_wcv.wait_until(wul, std::chrono::steady_clock::now() + 10ms, [&] {return data > 1000000ULL; });
dataq.push(std::ref(data));
_rcv.notify_one();
}
termthd.store(true);
_rcv.notify_all();
//std::this_thread::yield();
for (int i = 0; i < 9; i++) {
thdvec[i]->join();
}
}
I am unable to figure out the problem. How can I make sure the threads get woken up and processes the requests and terminates normally?
This std::unique_lock<std::mutex> wul(_rmtx); will lock the _rmtx mutex until the end of main scope. It's surely an issue, because other threads trying to get the lock on _rmtx will block:
int main()
{
std::vector<std::thread*> thdvec;
std::unique_lock<std::mutex> wul(_rmtx); // <- locking mutex until end of main.
// other threads trying to lock _rmtx will block
unsigned long long data = 0ULL;
// ... rest of the code ...
I've implemented thread pooling following the answer of Kerrek SB in this question.
I've implemented MPMC queue for the functions and vector threads for the threads.
Everything worked perfectly, except that I don't know how to terminate the program, in the end if I just do thread.join since the thread is still waiting for more tasks to do, it will not join and the main thread will not continue.
Any idea how to end the program correctly?
For completeness, this is my code:
function_pool.h
#pragma once
#include <queue>
#include <functional>
#include <mutex>
#include <condition_variable>
class Function_pool
{
private:
std::queue<std::function<void()>> m_function_queue;
std::mutex m_lock;
std::condition_variable m_data_condition;
public:
Function_pool();
~Function_pool();
void push(std::function<void()> func);
std::function<void()> pop();
};
function_pool.cpp
#include "function_pool.h"
Function_pool::Function_pool() : m_function_queue(), m_lock(), m_data_condition()
{
}
Function_pool::~Function_pool()
{
}
void Function_pool::push(std::function<void()> func)
{
std::unique_lock<std::mutex> lock(m_lock);
m_function_queue.push(func);
// when we send the notification immediately, the consumer will try to
get the lock , so unlock asap
lock.unlock();
m_data_condition.notify_one();
}
std::function<void()> Function_pool::pop()
{
std::unique_lock<std::mutex> lock(m_lock);
m_data_condition.wait(lock, [this]() {return !m_function_queue.empty();
});
auto func = m_function_queue.front();
m_function_queue.pop();
return func;
// Lock will be released
}
main.cpp
#include "function_pool.h"
#include <string>
#include <iostream>
#include <mutex>
#include <functional>
#include <thread>
#include <vector>
Function_pool func_pool;
void example_function()
{
std::cout << "bla" << std::endl;
}
void infinite_loop_func()
{
while (true)
{
std::function<void()> func = func_pool.pop();
func();
}
}
int main()
{
std::cout << "stating operation" << std::endl;
int num_threads = std::thread::hardware_concurrency();
std::cout << "number of threads = " << num_threads << std::endl;
std::vector<std::thread> thread_pool;
for (int i = 0; i < num_threads; i++)
{
thread_pool.push_back(std::thread(infinite_loop_func));
}
//here we should send our functions
func_pool.push(example_function);
for (int i = 0; i < thread_pool.size(); i++)
{
thread_pool.at(i).join();
}
int i;
std::cin >> i;
}
Your problem is located in infinite_loop_func, which is an infinite loop and by result doesn't terminate. I've read the previous answer which suggests throwing an exception, however, I don't like it since exceptions should not be used for the regular control flow.
The best way to solve this is to explicitly deal with the stop condition. For example:
std::atomic<bool> acceptsFunctions;
Adding this to the function pool allows you to clearly have state and to assert that no new functions being added when you destruct.
std::optional<std::function<void()>> Function_pool::pop()
Returning an empty optional (or function in C++14 and before), allows you to deal with an empty queue. You have to, as condition_variable can do spurious wakeups.
With this, m_data_condition.notify_all() can be used to wake all threads.
Finally we have to fix the infinite loop as it doesn't cover overcommitment and at the same time allows you to execute all functions still in the queue:
while (func_pool.acceptsFunctions || func_pool.containsFunctions())
{
auto f = func_pool.pop();
If (!f)
{
func_pool.m_data_condition.wait_for(1s);
continue;
}
auto &function = *f;
function ();
}
I'll leave it up to you to implement containsFunctions() and clean up the code (infinite_loop_func as member function?) Note that with a counter, you could even deal with background task being spawned.
You can always use a specific exception type to signal to infinite_loop_func that it should return...
class quit_worker_exception: public std::exception {};
Then change infinite_loop_func to...
void infinite_loop_func ()
{
while (true) {
std::function<void()> func = func_pool.pop();
try {
func();
}
catch (quit_worker_exception &ex) {
return;
}
}
}
With the above changes you could then use (in main)...
/*
* Enqueue `thread_pool.size()' function objects whose sole job is
* to throw an instance of `quit_worker_exception' when invoked.
*/
for (int i = 0; i < thread_pool.size(); i++)
func_pool.push([](){ throw quit_worker_exception(); });
/*
* Now just wait for each worker to terminate having received its
* quit_worker_exception.
*/
for (int i = 0; i < thread_pool.size(); i++)
thread_pool.at(i).join();
Each instance of infinite_loop_func will dequeue one function object which, when called, throws a quit_worker_exception causing it to return.
Follwoing [JVApen](https://stackoverflow.com/posts/51382714/revisions) suggestion, I copy my code in case anyone will want a working code:
function_pool.h
#pragma once
#include <queue>
#include <functional>
#include <mutex>
#include <condition_variable>
#include <atomic>
#include <cassert>
class Function_pool
{
private:
std::queue<std::function<void()>> m_function_queue;
std::mutex m_lock;
std::condition_variable m_data_condition;
std::atomic<bool> m_accept_functions;
public:
Function_pool();
~Function_pool();
void push(std::function<void()> func);
void done();
void infinite_loop_func();
};
function_pool.cpp
#include "function_pool.h"
Function_pool::Function_pool() : m_function_queue(), m_lock(), m_data_condition(), m_accept_functions(true)
{
}
Function_pool::~Function_pool()
{
}
void Function_pool::push(std::function<void()> func)
{
std::unique_lock<std::mutex> lock(m_lock);
m_function_queue.push(func);
// when we send the notification immediately, the consumer will try to get the lock , so unlock asap
lock.unlock();
m_data_condition.notify_one();
}
void Function_pool::done()
{
std::unique_lock<std::mutex> lock(m_lock);
m_accept_functions = false;
lock.unlock();
// when we send the notification immediately, the consumer will try to get the lock , so unlock asap
m_data_condition.notify_all();
//notify all waiting threads.
}
void Function_pool::infinite_loop_func()
{
std::function<void()> func;
while (true)
{
{
std::unique_lock<std::mutex> lock(m_lock);
m_data_condition.wait(lock, [this]() {return !m_function_queue.empty() || !m_accept_functions; });
if (!m_accept_functions && m_function_queue.empty())
{
//lock will be release automatically.
//finish the thread loop and let it join in the main thread.
return;
}
func = m_function_queue.front();
m_function_queue.pop();
//release the lock
}
func();
}
}
main.cpp
#include "function_pool.h"
#include <string>
#include <iostream>
#include <mutex>
#include <functional>
#include <thread>
#include <vector>
Function_pool func_pool;
class quit_worker_exception : public std::exception {};
void example_function()
{
std::cout << "bla" << std::endl;
}
int main()
{
std::cout << "stating operation" << std::endl;
int num_threads = std::thread::hardware_concurrency();
std::cout << "number of threads = " << num_threads << std::endl;
std::vector<std::thread> thread_pool;
for (int i = 0; i < num_threads; i++)
{
thread_pool.push_back(std::thread(&Function_pool::infinite_loop_func, &func_pool));
}
//here we should send our functions
for (int i = 0; i < 50; i++)
{
func_pool.push(example_function);
}
func_pool.done();
for (unsigned int i = 0; i < thread_pool.size(); i++)
{
thread_pool.at(i).join();
}
}
I have a simple program with two threads, one that pushes a packaged_task into a deque, and other that executes it. In the tasks there is a this_thread::sleep_for, and I would expect that only the "process" thread would wait for it, but both are waiting, making the execution sequential. What I'm missing?
#include <future>
#include <iostream>
#include <deque>
std::mutex m;
std::condition_variable cv;
std::deque<std::packaged_task<void(int)>> deque;
void post() {
int id = 0;
auto lambda = [](int id) {
std::this_thread::sleep_for(std::chrono::seconds(std::rand() % 10 + 1));
std::cout << id << std::endl;
};
while (true) {
std::this_thread::sleep_for(std::chrono::seconds(1));
std::packaged_task<void(int)> task(lambda);
task(id++);
std::lock_guard<std::mutex> lg(m);
deque.emplace_back(std::move(task));
cv.notify_one();
}
}
void process() {
std::deque<std::packaged_task<void(int)>> to_exec;
while (true) {
while (!to_exec.empty()){
std::future<void> fut = to_exec.front().get_future();
fut.get();
to_exec.pop_front();
}
std::unique_lock<std::mutex> lk(m);
cv.wait(lk, []() {return !deque.empty(); });
while (!deque.empty()) {
to_exec.push_back(std::move(deque.front()));
deque.pop_front();
}
}
}
int main() {
std::thread tpost(post);
std::thread tprocess(process);
tpost.join();
tprocess.join();
}
I think more effective will be use std::async instead a sleeping random seconds...
I have this piece of code:
std::unique_lock<std::mutex> lock(m_mutex);
for(;;)
{
// wait for input notification
m_event.wait(lock);
// if there is an input pin doesn't have any data, just wait
for(DataPinIn* ptr:m_in_ports)
if(ptr->m_data_dup==NULL)
continue;
// do work
Work(&m_in_ports,&m_out_ports);
// this might need a lock, we'll see
for(DataPinIn* ptr:m_in_ports)
{
// reduce the data refcnt before we lose it
ptr->FreeData();
ptr->m_data_dup=NULL;
std::cout<<"ptr:"<<ptr<<"set to 0\n";
}
}
in which m_event is a condition_variable.
It waits for notification from another thread and then does some works. But I found out that this only succeeds for the first time and it blocks on m_event.wait(lock) forever, no matter how many times m_event.notify_one() is called. How should I solve this?
Thanks in advance.
You are experiencing the common scenario 'spurious wakeup' (please consult wiki) which condition_variable is desgined to solve.
Please read the sample code in this article: http://www.cplusplus.com/reference/condition_variable/condition_variable/.
Usually condition_variable must be used together with a certain variable to avoid spurious wakeups; that's how the synchronization method is named.
Below is a better piece of sample code:
#include <condition_variable>
#include <mutex>
#include <thread>
#include <iostream>
#include <queue>
#include <chrono>
int main()
{
std::queue<int> produced_nums;
std::mutex m;
std::condition_variable cond_var;
bool done = false;
bool notified = false;
std::thread producer([&]() {
for (int i = 0; i < 5; ++i) {
std::this_thread::sleep_for(std::chrono::seconds(1));
std::unique_lock<std::mutex> lock(m);
std::cout << "producing " << i << '\n';
produced_nums.push(i);
notified = true;
cond_var.notify_one();
}
done = true;
cond_var.notify_one();
});
std::thread consumer([&]() {
std::unique_lock<std::mutex> lock(m);
while (!done) {
while (!notified) { // loop to avoid spurious wakeups
cond_var.wait(lock);
}
while (!produced_nums.empty()) {
std::cout << "consuming " << produced_nums.front() << '\n';
produced_nums.pop();
}
notified = false;
}
});
producer.join();
consumer.join();
}
It turns out that a flag variable ruined everything and the threading part is working correctly.
I am using VS2012 and I want to set thread priority from within a running thread. The goal is to initialize all threads with the highest priority state. To do this I want to get a HANDLE to the thread.
I am having some trouble accessing the pointer that corresponds to the thread object.
Is this possible?
From the calling main thread, the pointer is valid and from the C++11 thread it is set to CCCCCCCC. Predictably dereferencing some nonsense memory location causes a crash.
The code below is a simplified version showing the problem.
#include "stdafx.h"
#include <Windows.h>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <iostream>
#include <atomic>
using namespace std;
class threadContainer
{
thread* mT;
condition_variable* con;
void lockMe()
{
mutex m;
unique_lock<std::mutex> lock(m);
con->wait(lock);//waits for host thread
cout << mT << endl;//CCCCCCCC
auto h = mT->native_handle();//causes a crash
con->wait(lock);//locks forever
}
public:
void run()
{
con = new condition_variable();
mT = new thread(&threadContainer::lockMe,*this);
cout << mT << endl; //00326420
con->notify_one();// Without this line everything locks as expected
mT->join();
}
};
int _tmain(int argc, _TCHAR* argv[])
{
threadContainer mContainer;
mContainer.run();
return 0;
}
#include <mutex>
#include <condition_variable>
#include <iostream>
#include <atomic>
#include <thread>
class threadContainer {
std::thread* mT;
std::mutex m;
void lockMe() {
// wait for mT to be assigned:
{
std::unique_lock<std::mutex> lock(m);
}
std::cout << "lockMe():" << mT << "\n";
auto h = mT->native_handle();//causes a crash
std::cout << "Done lockMe!\n";
}
public:
void run() {
// release lock only after mT assigned:
{
std::unique_lock<std::mutex> lock(m);
mT = new std::thread( [&](){ this->lockMe(); } );
}
std::cout << "run():" << mT << "\n"; //00326420
mT->join();
}
};
int main() {
threadContainer mContainer;
mContainer.run();
return 0;
}
Try that.
0xcccccccc means "variable not initialized". You have a threading race bug in your code. The thread starts running before the "mT" variable is assigned. You will need additional synchronization to block the thread until the assignment is completed so you can safely use mT. This will then also ensure that the new thread can see the updated value of mT, a memory barrier is required on a multi-core machine.
This is an example code with condition_variable and mutex.
class threadContainer
{
std::thread* mT;
std::mutex m;
std::condition_variable cv;
bool flag;
void lockMe() {
// 1. you must acquire lock of mutex.
unique_lock<std::mutex> lk(m);
// 2. and wait on `cv` for `flag==true`
cv.wait(lk, [&]{ return flag; });
cout << mT << endl;
auto h = mT->native_handle();
}
public:
void run()
{
flag = false;
mT = new std::thread( [&](){ this->lockMe(); } );
{
// 3. set `flag` and signal `cv`
lock_guard<decltype(m)> lk(m);
cout << mT << endl;
flag = true;
cv.notify_one();
}
mT->join();
}
};
If what you really want to do is "initialize all threads with the highest priority state", how about this simplified code?
Anyway, changing thread priority is platform dependent and out of C++ Standard library.
class threadContainer
{
std::thread thd;
void work() {
// (1) change thread priority itself
::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
// do something...
}
public:
void run()
{
thd = std::thread( [&](){ this->work(); } );
// (2) or change thread priority from outside
::SetThreadPriority(thd.native_handle(), THREAD_PRIORITY_HIGHEST);
thd.join();
}
};