This question should be a little simpler than my last few. I've implemented the following work queue in my program:
Pool.h:
// tpool class
// It's always closed. :glasses:
#ifndef __POOL_H
#define __POOL_H
class tpool {
public:
tpool( std::size_t tpool_size );
~tpool();
template< typename Task >
void run_task( Task task ){
boost::unique_lock< boost::mutex > lock( mutex_ );
if( 0 < available_ ) {
--available_;
io_service_.post( boost::bind( &tpool::wrap_task, this, boost::function< void() > ( task ) ) );
}
}
private:
boost::asio::io_service io_service_;
boost::asio::io_service::work work_;
boost::thread_group threads_;
std::size_t available_;
boost::mutex mutex_;
void wrap_task( boost::function< void() > task );
};
extern tpool dbpool;
#endif
pool.cpp:
#include <boost/asio/io_service.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include "pool.h"
tpool::tpool( std::size_t tpool_size ) : work_( io_service_ ), available_( tpool_size ) {
for ( std::size_t i = 0; i < tpool_size; ++i ){
threads_.create_thread( boost::bind( &boost::asio::io_service::run, &io_service_ ) );
}
}
tpool::~tpool() {
io_service_.stop();
try {
threads_.join_all();
}
catch( ... ) {}
}
void tpool::wrap_task( boost::function< void() > task ) {
// run the supplied task
try {
task();
} // suppress exceptions
catch( ... ) {
}
boost::unique_lock< boost::mutex > lock( mutex_ );
++available_;
}
tpool dbpool( 50 );
The problem is, though, is that not all my calls to run_task() are being completed by worker threads. I'm not sure if it's because it's not entering into the queue or because the task vanishes when the thread that created it exits.
So my question is, is there anything special I have to give to boost::thread to make it wait until the queue is unlocked? and what is the expected lifetime of a task entered into a queue? Do the tasks go out of scope when the thread that created them exits? If so, how can I prevent that from happening?
Edit: I've made the following changes to my code:
template< typename Task >
void run_task( Task task ){ // add item to the queue
io_service_.post( boost::bind( &tpool::wrap_task, this, boost::function< void() > ( task ) ) );
}
and am now seeing all entries being entered correctly. However, I am left with one lingering question: What is the lifetime of tasks added to the queue? Do they cease to exists once the thread that created them exits?
Well. That's really quite simple; You're rejecting the tasks posted!
template< typename Task >
void run_task(task task){
boost::unique_lock<boost::mutex> lock( mutex_ );
if(0 < available_) {
--available_;
io_service_.post(boost::bind(&tpool::wrap_task, this, boost::function< void() > ( task )));
}
}
Note that the lock "waits" until the mutex is not owned by a thread. This might already be the case, and possibly when available_ is already 0. Now the line
if(0 < available_) {
This line is simply the condition. It's not "magical" because you're holding the mutex_ locked. (The program doesn't even know that a relation exists between mutex_ and available_). So, if available_ <= 0 you will just skip posting the job.
Solution #1
You should use the io_service to queue for you. This is likely what you wanted to achieve in the first place. Instead of keeping track of "available" threads, io_service does the work for you. You control how many threads it may use, by running the io_service on as many threads. Simple.
Since io_service is already thread-safe, you can do without the lock.
#include <boost/asio.hpp>
#include <boost/thread.hpp>
#include <iostream>
// tpool class
// It's always closed. :glasses:
#ifndef __POOL_H
#define __POOL_H
class tpool {
public:
tpool( std::size_t tpool_size );
~tpool();
template<typename Task>
void run_task(Task task){
io_service_.post(task);
}
private:
// note the order of destruction of members
boost::asio::io_service io_service_;
boost::asio::io_service::work work_;
boost::thread_group threads_;
};
extern tpool dbpool;
#endif
#include <boost/asio/io_service.hpp>
#include <boost/thread/mutex.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
//#include "pool.h"
tpool::tpool(std::size_t tpool_size) : work_(io_service_) {
for (std::size_t i = 0; i < tpool_size; ++i)
{
threads_.create_thread(
boost::bind(&boost::asio::io_service::run, &io_service_)
);
}
}
tpool::~tpool() {
io_service_.stop();
try {
threads_.join_all();
}
catch(...) {}
}
void foo() { std::cout << __PRETTY_FUNCTION__ << "\n"; }
void bar() { std::cout << __PRETTY_FUNCTION__ << "\n"; }
int main() {
tpool dbpool(50);
dbpool.run_task(foo);
dbpool.run_task(bar);
boost::this_thread::sleep_for(boost::chrono::seconds(1));
}
For shutdown purposes, you will want to enable "clearing" the io_service::work object, otherwise your pool will never exit.
Solution #2
Don't use io_service, instead roll your own queue implementation with a condition variable to notify a worker thread of new work being posted. Again, the number of workers is determined by the number of threads in the group.
#include <boost/thread.hpp>
#include <boost/phoenix.hpp>
#include <boost/optional.hpp>
using namespace boost;
using namespace boost::phoenix::arg_names;
class thread_pool
{
private:
mutex mx;
condition_variable cv;
typedef function<void()> job_t;
std::deque<job_t> _queue;
thread_group pool;
boost::atomic_bool shutdown;
static void worker_thread(thread_pool& q)
{
while (auto job = q.dequeue())
(*job)();
}
public:
thread_pool() : shutdown(false) {
for (unsigned i = 0; i < boost::thread::hardware_concurrency(); ++i)
pool.create_thread(bind(worker_thread, ref(*this)));
}
void enqueue(job_t job)
{
lock_guard<mutex> lk(mx);
_queue.push_back(std::move(job));
cv.notify_one();
}
optional<job_t> dequeue()
{
unique_lock<mutex> lk(mx);
namespace phx = boost::phoenix;
cv.wait(lk, phx::ref(shutdown) || !phx::empty(phx::ref(_queue)));
if (_queue.empty())
return none;
auto job = std::move(_queue.front());
_queue.pop_front();
return std::move(job);
}
~thread_pool()
{
shutdown = true;
{
lock_guard<mutex> lk(mx);
cv.notify_all();
}
pool.join_all();
}
};
void the_work(int id)
{
std::cout << "worker " << id << " entered\n";
// no more synchronization; the pool size determines max concurrency
std::cout << "worker " << id << " start work\n";
this_thread::sleep_for(chrono::seconds(2));
std::cout << "worker " << id << " done\n";
}
int main()
{
thread_pool pool; // uses 1 thread per core
for (int i = 0; i < 10; ++i)
pool.enqueue(bind(the_work, i));
}
Related
I've implemented thread pooling following the answer of Kerrek SB in this question.
I've implemented MPMC queue for the functions and vector threads for the threads.
Everything worked perfectly, except that I don't know how to terminate the program, in the end if I just do thread.join since the thread is still waiting for more tasks to do, it will not join and the main thread will not continue.
Any idea how to end the program correctly?
For completeness, this is my code:
function_pool.h
#pragma once
#include <queue>
#include <functional>
#include <mutex>
#include <condition_variable>
class Function_pool
{
private:
std::queue<std::function<void()>> m_function_queue;
std::mutex m_lock;
std::condition_variable m_data_condition;
public:
Function_pool();
~Function_pool();
void push(std::function<void()> func);
std::function<void()> pop();
};
function_pool.cpp
#include "function_pool.h"
Function_pool::Function_pool() : m_function_queue(), m_lock(), m_data_condition()
{
}
Function_pool::~Function_pool()
{
}
void Function_pool::push(std::function<void()> func)
{
std::unique_lock<std::mutex> lock(m_lock);
m_function_queue.push(func);
// when we send the notification immediately, the consumer will try to
get the lock , so unlock asap
lock.unlock();
m_data_condition.notify_one();
}
std::function<void()> Function_pool::pop()
{
std::unique_lock<std::mutex> lock(m_lock);
m_data_condition.wait(lock, [this]() {return !m_function_queue.empty();
});
auto func = m_function_queue.front();
m_function_queue.pop();
return func;
// Lock will be released
}
main.cpp
#include "function_pool.h"
#include <string>
#include <iostream>
#include <mutex>
#include <functional>
#include <thread>
#include <vector>
Function_pool func_pool;
void example_function()
{
std::cout << "bla" << std::endl;
}
void infinite_loop_func()
{
while (true)
{
std::function<void()> func = func_pool.pop();
func();
}
}
int main()
{
std::cout << "stating operation" << std::endl;
int num_threads = std::thread::hardware_concurrency();
std::cout << "number of threads = " << num_threads << std::endl;
std::vector<std::thread> thread_pool;
for (int i = 0; i < num_threads; i++)
{
thread_pool.push_back(std::thread(infinite_loop_func));
}
//here we should send our functions
func_pool.push(example_function);
for (int i = 0; i < thread_pool.size(); i++)
{
thread_pool.at(i).join();
}
int i;
std::cin >> i;
}
Your problem is located in infinite_loop_func, which is an infinite loop and by result doesn't terminate. I've read the previous answer which suggests throwing an exception, however, I don't like it since exceptions should not be used for the regular control flow.
The best way to solve this is to explicitly deal with the stop condition. For example:
std::atomic<bool> acceptsFunctions;
Adding this to the function pool allows you to clearly have state and to assert that no new functions being added when you destruct.
std::optional<std::function<void()>> Function_pool::pop()
Returning an empty optional (or function in C++14 and before), allows you to deal with an empty queue. You have to, as condition_variable can do spurious wakeups.
With this, m_data_condition.notify_all() can be used to wake all threads.
Finally we have to fix the infinite loop as it doesn't cover overcommitment and at the same time allows you to execute all functions still in the queue:
while (func_pool.acceptsFunctions || func_pool.containsFunctions())
{
auto f = func_pool.pop();
If (!f)
{
func_pool.m_data_condition.wait_for(1s);
continue;
}
auto &function = *f;
function ();
}
I'll leave it up to you to implement containsFunctions() and clean up the code (infinite_loop_func as member function?) Note that with a counter, you could even deal with background task being spawned.
You can always use a specific exception type to signal to infinite_loop_func that it should return...
class quit_worker_exception: public std::exception {};
Then change infinite_loop_func to...
void infinite_loop_func ()
{
while (true) {
std::function<void()> func = func_pool.pop();
try {
func();
}
catch (quit_worker_exception &ex) {
return;
}
}
}
With the above changes you could then use (in main)...
/*
* Enqueue `thread_pool.size()' function objects whose sole job is
* to throw an instance of `quit_worker_exception' when invoked.
*/
for (int i = 0; i < thread_pool.size(); i++)
func_pool.push([](){ throw quit_worker_exception(); });
/*
* Now just wait for each worker to terminate having received its
* quit_worker_exception.
*/
for (int i = 0; i < thread_pool.size(); i++)
thread_pool.at(i).join();
Each instance of infinite_loop_func will dequeue one function object which, when called, throws a quit_worker_exception causing it to return.
Follwoing [JVApen](https://stackoverflow.com/posts/51382714/revisions) suggestion, I copy my code in case anyone will want a working code:
function_pool.h
#pragma once
#include <queue>
#include <functional>
#include <mutex>
#include <condition_variable>
#include <atomic>
#include <cassert>
class Function_pool
{
private:
std::queue<std::function<void()>> m_function_queue;
std::mutex m_lock;
std::condition_variable m_data_condition;
std::atomic<bool> m_accept_functions;
public:
Function_pool();
~Function_pool();
void push(std::function<void()> func);
void done();
void infinite_loop_func();
};
function_pool.cpp
#include "function_pool.h"
Function_pool::Function_pool() : m_function_queue(), m_lock(), m_data_condition(), m_accept_functions(true)
{
}
Function_pool::~Function_pool()
{
}
void Function_pool::push(std::function<void()> func)
{
std::unique_lock<std::mutex> lock(m_lock);
m_function_queue.push(func);
// when we send the notification immediately, the consumer will try to get the lock , so unlock asap
lock.unlock();
m_data_condition.notify_one();
}
void Function_pool::done()
{
std::unique_lock<std::mutex> lock(m_lock);
m_accept_functions = false;
lock.unlock();
// when we send the notification immediately, the consumer will try to get the lock , so unlock asap
m_data_condition.notify_all();
//notify all waiting threads.
}
void Function_pool::infinite_loop_func()
{
std::function<void()> func;
while (true)
{
{
std::unique_lock<std::mutex> lock(m_lock);
m_data_condition.wait(lock, [this]() {return !m_function_queue.empty() || !m_accept_functions; });
if (!m_accept_functions && m_function_queue.empty())
{
//lock will be release automatically.
//finish the thread loop and let it join in the main thread.
return;
}
func = m_function_queue.front();
m_function_queue.pop();
//release the lock
}
func();
}
}
main.cpp
#include "function_pool.h"
#include <string>
#include <iostream>
#include <mutex>
#include <functional>
#include <thread>
#include <vector>
Function_pool func_pool;
class quit_worker_exception : public std::exception {};
void example_function()
{
std::cout << "bla" << std::endl;
}
int main()
{
std::cout << "stating operation" << std::endl;
int num_threads = std::thread::hardware_concurrency();
std::cout << "number of threads = " << num_threads << std::endl;
std::vector<std::thread> thread_pool;
for (int i = 0; i < num_threads; i++)
{
thread_pool.push_back(std::thread(&Function_pool::infinite_loop_func, &func_pool));
}
//here we should send our functions
for (int i = 0; i < 50; i++)
{
func_pool.push(example_function);
}
func_pool.done();
for (unsigned int i = 0; i < thread_pool.size(); i++)
{
thread_pool.at(i).join();
}
}
Suggest that I have the following code:
#include <boost/asio/io_service.hpp>
#include <boost/thread.hpp>
#include <condition_variable>
#include <iostream>
#include <mutex>
const int THREAD_POOL_SIZE = 2;
std::condition_variable g_cv;
std::mutex g_cv_mutex;
bool g_answer_ready;
void foo()
{
std::cout << "foo \n";
std::unique_lock<std::mutex> lock(g_cv_mutex);
g_answer_ready = true;
g_cv.notify_all();
}
int main()
{
boost::asio::io_service io_service;
for (int i = 0; i < 10; ++i)
{
std::auto_ptr<boost::asio::io_service::work> work(new boost::asio::io_service::work(io_service));
boost::thread_group threads;
for (int i = 0; i < THREAD_POOL_SIZE; ++i)
{
threads.create_thread(boost::bind(&boost::asio::io_service::run, &io_service));
}
std::unique_lock<std::mutex> lock(g_cv_mutex);
io_service.post(foo);
g_answer_ready = false;
g_cv.wait_for(lock, std::chrono::milliseconds(2000));
if (!g_answer_ready)
{
std::cout << "timed_out \n";
}
io_service.stop();
threads.join_all();
}
}
The output will be different between program's launches. For example,
foo
timed_out
foo
foo
However, if I move boost::asio::io_service object construction inside the loop, it works as expected:
foo
foo
foo
foo
foo
foo
foo
foo
foo
foo
Why? What am I doing wrong? How can I fix it?
boost 1.54, MSVC-11.0
If I understand you correctly, you need to fix last lines on your loop (see comments for descriptions):
// io_service.stop();
// threads.join_all();
work.reset(); // <- signal to process all pending jobs and quit from io_service::run function
threads.join_all(); // <- wait for all threads
io_service.reset(); // <- now `io_service` can accept new tasks
So, there were two issues in the original code:
io_service.stop() will cancel posted but not yet processed jobs (usually this is not what a programmer wants),
io_service.reset() is required to change the state of io_service from "stopped" to "ready to accept new jobs".
How to create timer events using C++ 11?
I need something like: “Call me after 1 second from now”.
Is there any library?
Made a simple implementation of what I believe to be what you want to achieve. You can use the class later with the following arguments:
int (milliseconds to wait until to run the code)
bool (if true it returns instantly and runs the code after specified time on another thread)
variable arguments (exactly what you'd feed to std::bind)
You can change std::chrono::milliseconds to std::chrono::nanoseconds or microseconds for even higher precision and add a second int and a for loop to specify for how many times to run the code.
Here you go, enjoy:
#include <functional>
#include <chrono>
#include <future>
#include <cstdio>
class later
{
public:
template <class callable, class... arguments>
later(int after, bool async, callable&& f, arguments&&... args)
{
std::function<typename std::result_of<callable(arguments...)>::type()> task(std::bind(std::forward<callable>(f), std::forward<arguments>(args)...));
if (async)
{
std::thread([after, task]() {
std::this_thread::sleep_for(std::chrono::milliseconds(after));
task();
}).detach();
}
else
{
std::this_thread::sleep_for(std::chrono::milliseconds(after));
task();
}
}
};
void test1(void)
{
return;
}
void test2(int a)
{
printf("%i\n", a);
return;
}
int main()
{
later later_test1(1000, false, &test1);
later later_test2(1000, false, &test2, 101);
return 0;
}
Outputs after two seconds:
101
The asynchronous solution from Edward:
create new thread
sleep in that thread
do the task in that thread
is simple and might just work for you.
I would also like to give a more advanced version which has these advantages:
no thread startup overhead
only a single extra thread per process required to handle all timed tasks
This might be in particular useful in large software projects where you have many task executed repetitively in your process and you care about resource usage (threads) and also startup overhead.
Idea: Have one service thread which processes all registered timed tasks. Use boost io_service for that.
Code similar to:
http://www.boost.org/doc/libs/1_65_1/doc/html/boost_asio/tutorial/tuttimer2/src.html
#include <cstdio>
#include <boost/asio.hpp>
#include <boost/date_time/posix_time/posix_time.hpp>
int main()
{
boost::asio::io_service io;
boost::asio::deadline_timer t(io, boost::posix_time::seconds(1));
t.async_wait([](const boost::system::error_code& /*e*/){
printf("Printed after 1s\n"); });
boost::asio::deadline_timer t2(io, boost::posix_time::seconds(1));
t2.async_wait([](const boost::system::error_code& /*e*/){
printf("Printed after 1s\n"); });
// both prints happen at the same time,
// but only a single thread is used to handle both timed tasks
// - namely the main thread calling io.run();
io.run();
return 0;
}
Use RxCpp,
std::cout << "Waiting..." << std::endl;
auto values = rxcpp::observable<>::timer<>(std::chrono::seconds(1));
values.subscribe([](int v) {std::cout << "Called after 1s." << std::endl;});
This is the code I have so far:
I am using VC++ 2012 (no variadic templates)
//header
#include <thread>
#include <mutex>
#include <condition_variable>
#include <vector>
#include <chrono>
#include <memory>
#include <algorithm>
template<class T>
class TimerThread
{
typedef std::chrono::high_resolution_clock clock_t;
struct TimerInfo
{
clock_t::time_point m_TimePoint;
T m_User;
template <class TArg1>
TimerInfo(clock_t::time_point tp, TArg1 && arg1)
: m_TimePoint(tp)
, m_User(std::forward<TArg1>(arg1))
{
}
template <class TArg1, class TArg2>
TimerInfo(clock_t::time_point tp, TArg1 && arg1, TArg2 && arg2)
: m_TimePoint(tp)
, m_User(std::forward<TArg1>(arg1), std::forward<TArg2>(arg2))
{
}
};
std::unique_ptr<std::thread> m_Thread;
std::vector<TimerInfo> m_Timers;
std::mutex m_Mutex;
std::condition_variable m_Condition;
bool m_Sort;
bool m_Stop;
void TimerLoop()
{
for (;;)
{
std::unique_lock<std::mutex> lock(m_Mutex);
while (!m_Stop && m_Timers.empty())
{
m_Condition.wait(lock);
}
if (m_Stop)
{
return;
}
if (m_Sort)
{
//Sort could be done at insert
//but probabily this thread has time to do
std::sort(m_Timers.begin(),
m_Timers.end(),
[](const TimerInfo & ti1, const TimerInfo & ti2)
{
return ti1.m_TimePoint > ti2.m_TimePoint;
});
m_Sort = false;
}
auto now = clock_t::now();
auto expire = m_Timers.back().m_TimePoint;
if (expire > now) //can I take a nap?
{
auto napTime = expire - now;
m_Condition.wait_for(lock, napTime);
//check again
auto expire = m_Timers.back().m_TimePoint;
auto now = clock_t::now();
if (expire <= now)
{
TimerCall(m_Timers.back().m_User);
m_Timers.pop_back();
}
}
else
{
TimerCall(m_Timers.back().m_User);
m_Timers.pop_back();
}
}
}
template<class T, class TArg1>
friend void CreateTimer(TimerThread<T>& timerThread, int ms, TArg1 && arg1);
template<class T, class TArg1, class TArg2>
friend void CreateTimer(TimerThread<T>& timerThread, int ms, TArg1 && arg1, TArg2 && arg2);
public:
TimerThread() : m_Stop(false), m_Sort(false)
{
m_Thread.reset(new std::thread(std::bind(&TimerThread::TimerLoop, this)));
}
~TimerThread()
{
m_Stop = true;
m_Condition.notify_all();
m_Thread->join();
}
};
template<class T, class TArg1>
void CreateTimer(TimerThread<T>& timerThread, int ms, TArg1 && arg1)
{
{
std::unique_lock<std::mutex> lock(timerThread.m_Mutex);
timerThread.m_Timers.emplace_back(TimerThread<T>::TimerInfo(TimerThread<T>::clock_t::now() + std::chrono::milliseconds(ms),
std::forward<TArg1>(arg1)));
timerThread.m_Sort = true;
}
// wake up
timerThread.m_Condition.notify_one();
}
template<class T, class TArg1, class TArg2>
void CreateTimer(TimerThread<T>& timerThread, int ms, TArg1 && arg1, TArg2 && arg2)
{
{
std::unique_lock<std::mutex> lock(timerThread.m_Mutex);
timerThread.m_Timers.emplace_back(TimerThread<T>::TimerInfo(TimerThread<T>::clock_t::now() + std::chrono::milliseconds(ms),
std::forward<TArg1>(arg1),
std::forward<TArg2>(arg2)));
timerThread.m_Sort = true;
}
// wake up
timerThread.m_Condition.notify_one();
}
//sample
#include <iostream>
#include <string>
void TimerCall(int i)
{
std::cout << i << std::endl;
}
int main()
{
std::cout << "start" << std::endl;
TimerThread<int> timers;
CreateTimer(timers, 2000, 1);
CreateTimer(timers, 5000, 2);
CreateTimer(timers, 100, 3);
std::this_thread::sleep_for(std::chrono::seconds(5));
std::cout << "end" << std::endl;
}
If you are on Windows, you can use the CreateThreadpoolTimer function to schedule a callback without needing to worry about thread management and without blocking the current thread.
template<typename T>
static void __stdcall timer_fired(PTP_CALLBACK_INSTANCE, PVOID context, PTP_TIMER timer)
{
CloseThreadpoolTimer(timer);
std::unique_ptr<T> callable(reinterpret_cast<T*>(context));
(*callable)();
}
template <typename T>
void call_after(T callable, long long delayInMs)
{
auto state = std::make_unique<T>(std::move(callable));
auto timer = CreateThreadpoolTimer(timer_fired<T>, state.get(), nullptr);
if (!timer)
{
throw std::runtime_error("Timer");
}
ULARGE_INTEGER due;
due.QuadPart = static_cast<ULONGLONG>(-(delayInMs * 10000LL));
FILETIME ft;
ft.dwHighDateTime = due.HighPart;
ft.dwLowDateTime = due.LowPart;
SetThreadpoolTimer(timer, &ft, 0 /*msPeriod*/, 0 /*msWindowLength*/);
state.release();
}
int main()
{
auto callback = []
{
std::cout << "in callback\n";
};
call_after(callback, 1000);
std::cin.get();
}
I'm looking for a simple solution and everything I found is too long and complicated. After reading the documentation, I found that this can be done in just a few lines of code.
This question may be old but can beneficial to future researchers.
Example: Set isContinue to false if you want to stop the thread.
#include <chrono>
#include <thread>
volatile bool isContinue = true;
void NameOfYourFunction(){
while(continue){
std::this_thread::sleep_for(std::chrono::milliseconds(1000)); //sleep for 1 seconds
//do something here after every 1 seconds...
}
}
int main(){
std::thread your_thread(NameOfYourFunction); // Register your `YourFunction`.
your_thread.detach(); // this will be non-blocking thread.
//your_thread.join(); // this will be blocking thread.
}
use detach() or join() depending on your situation.
When using detach(), the execution main thread continues running.
When using join(), the execution main thread pauses and waits until
the new thread ends.
I am using VS2012 and I want to set thread priority from within a running thread. The goal is to initialize all threads with the highest priority state. To do this I want to get a HANDLE to the thread.
I am having some trouble accessing the pointer that corresponds to the thread object.
Is this possible?
From the calling main thread, the pointer is valid and from the C++11 thread it is set to CCCCCCCC. Predictably dereferencing some nonsense memory location causes a crash.
The code below is a simplified version showing the problem.
#include "stdafx.h"
#include <Windows.h>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <iostream>
#include <atomic>
using namespace std;
class threadContainer
{
thread* mT;
condition_variable* con;
void lockMe()
{
mutex m;
unique_lock<std::mutex> lock(m);
con->wait(lock);//waits for host thread
cout << mT << endl;//CCCCCCCC
auto h = mT->native_handle();//causes a crash
con->wait(lock);//locks forever
}
public:
void run()
{
con = new condition_variable();
mT = new thread(&threadContainer::lockMe,*this);
cout << mT << endl; //00326420
con->notify_one();// Without this line everything locks as expected
mT->join();
}
};
int _tmain(int argc, _TCHAR* argv[])
{
threadContainer mContainer;
mContainer.run();
return 0;
}
#include <mutex>
#include <condition_variable>
#include <iostream>
#include <atomic>
#include <thread>
class threadContainer {
std::thread* mT;
std::mutex m;
void lockMe() {
// wait for mT to be assigned:
{
std::unique_lock<std::mutex> lock(m);
}
std::cout << "lockMe():" << mT << "\n";
auto h = mT->native_handle();//causes a crash
std::cout << "Done lockMe!\n";
}
public:
void run() {
// release lock only after mT assigned:
{
std::unique_lock<std::mutex> lock(m);
mT = new std::thread( [&](){ this->lockMe(); } );
}
std::cout << "run():" << mT << "\n"; //00326420
mT->join();
}
};
int main() {
threadContainer mContainer;
mContainer.run();
return 0;
}
Try that.
0xcccccccc means "variable not initialized". You have a threading race bug in your code. The thread starts running before the "mT" variable is assigned. You will need additional synchronization to block the thread until the assignment is completed so you can safely use mT. This will then also ensure that the new thread can see the updated value of mT, a memory barrier is required on a multi-core machine.
This is an example code with condition_variable and mutex.
class threadContainer
{
std::thread* mT;
std::mutex m;
std::condition_variable cv;
bool flag;
void lockMe() {
// 1. you must acquire lock of mutex.
unique_lock<std::mutex> lk(m);
// 2. and wait on `cv` for `flag==true`
cv.wait(lk, [&]{ return flag; });
cout << mT << endl;
auto h = mT->native_handle();
}
public:
void run()
{
flag = false;
mT = new std::thread( [&](){ this->lockMe(); } );
{
// 3. set `flag` and signal `cv`
lock_guard<decltype(m)> lk(m);
cout << mT << endl;
flag = true;
cv.notify_one();
}
mT->join();
}
};
If what you really want to do is "initialize all threads with the highest priority state", how about this simplified code?
Anyway, changing thread priority is platform dependent and out of C++ Standard library.
class threadContainer
{
std::thread thd;
void work() {
// (1) change thread priority itself
::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
// do something...
}
public:
void run()
{
thd = std::thread( [&](){ this->work(); } );
// (2) or change thread priority from outside
::SetThreadPriority(thd.native_handle(), THREAD_PRIORITY_HIGHEST);
thd.join();
}
};
I am trying to create boost threadpool using boost asio with a work queue. But I am stuck at one point, I need a monitoring function which should keep running and keep track of queue. I am not sure how I can write that, currently I put it in my thread pool class. I am also trying to write seprate thread with global function. Can someone suggest any way?
#include <boost/thread/thread.hpp>
#include <boost/asio.hpp>
#include<memory>
#include<vector>
#include<list>
using namespace std;
class threadWork
{
public:
virtual void run()=0;
};
class thread_pool
{
private:
boost::asio::io_service io_service_;
boost::asio::io_service::work work_;
boost::thread_group threads_;
std::size_t available_;
boost::mutex mutex_;
list<shared_ptr<threadWork>> workQueue;
public:
thread_pool( std::size_t pool_size ) : work_( io_service_ ), available_( pool_size )
{
for ( std::size_t i = 0; i < pool_size; ++i )
{
threads_.create_thread( boost::bind( &boost::asio::io_service::run, &io_service_ ) );
}
}
~thread_pool()
{
io_service_.stop();
try
{
threads_.join_all();
}
catch ( ... ) {}
}
void enqueue(shared_ptr<threadWork> work)
{
workQueue.push_back(work);
}
void keepRunning() // how to call this ?
{
while(true)
{
boost::unique_lock< boost::mutex > lock( mutex_ );
if ( 0 == available_ ) // If no threads are available, then sleep.
{
boost::this_thread::sleep(boost::posix_time::milliseconds(1000));
}
else
{
if(workQueue.empty() != true)
{
--available_;
io_service_.post( boost::bin(&thread_pool::wrap_task,this , workQueue));
workQueue.pop_front();
}
}
}
}
private:
void wrap_task(list<shared_ptr<threadWork>>& workQueue )
{
try
{
workQueue.front()->run(); // Run the user supplied task.
}
catch ( ... ) // Suppress all exceptions.
{
}
boost::unique_lock< boost::mutex > lock( mutex_ );
++available_;
}
};
class someWork:public threadWork
{
public:
virtual void run()
{
cout<<"some long task \n";
boost::this_thread::sleep(boost::posix_time::milliseconds(5000));
}
};
int main()
{
thread_pool pool(10);
pool.keepRunning(); // this stuck code so where to start this ?
shared_ptr<threadWork> w(new someWork);
pool.enqueue(w);
return 0;
}
You could use boost::asio::deadline_timer to periodically have your io_service update an atomic variable representing the queue length. A class member function could return the queue length on demand (by value).