How to run my thread in parallel of while loop - c++

Here is some code which increments several chronometers in parallel:
main.cpp
using namespace std;
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <math.h>
#include <cstdlib>
#include <unistd.h>
#include <iostream>
#include <sstream>
#include <thread>
#include <vector>
#include <future>
#include "mychrono.hpp"
int main()
{
std::vector<Chronometer*> car_crono;
Chronometer chrono, output_chrono;
std::vector<std::thread> threads;
std::vector<std::future<Chronometer&>> futures;
std::thread th;
//future<Chronometer> ft;
for(int i = 0; i < 2; i++)
{
car_crono.push_back(new Chronometer);
}
while (1) {
for(int i = 0; i<2; i++)
{
//
// //threads.push_back(std::thread(&Chronometer::start_chrono, car_crono[i], std::ref(chrono)));
// auto ft = std::async(std::launch::async, &Chronometer::start_chrono, car_crono[i], std::ref(chrono));
//
// std::cout << "Hello-world" << std::endl;
futures.emplace_back(std::async(std::launch::async, &Chronometer::start_chrono, car_crono[i], std::ref(chrono)));
}
std::cout << "hello-world" << std::endl;
//auto ft = std::async(std::launch::async, &Chronometer::start_chrono, car_crono[0], std::ref(chrono));
//std::cout << "Hello-world-2" << std::endl;
for(auto&& f: futures){
std::cout << f.get() << '\n';
}
}
car_crono.clear();
}
mychrono.cpp
#include "mychrono.hpp"
#include <time.h>
#include <iostream>
#include <cstdlib>
#include <unistd.h>
#include <sstream>
#include <thread>
//int Chronometer::hour(0), min(0), sec(0);
Chronometer::Chronometer() : hour(0), min(0), sec(0)
{
}
Chronometer& Chronometer::start_chrono(Chronometer& chrono)
{
// if(chrono.hour == 0 && chrono.min == 0 && chrono.sec == 0)
// {
bool condition = true;
while(condition) {
sleep(1);
chrono.sec++;
if(chrono.sec > 59) {
chrono.min++;
chrono.sec = 0;
}
if(chrono.min > 59) {
chrono.hour++;
chrono.sec = 0;
chrono.min = 0;
}
// if(chrono.sec == 10)
// {
// condition = false;
// }
std::cout << "chrono: " << chrono << std::endl;
}
return chrono;
//}
}
Chronometer& Chronometer::finish_chrono(Chronometer& chrono)
{
chrono.hour = 0;
chrono.sec = 0;
chrono.min = 0;
return chrono;
}
std::ostream& operator<<(std::ostream& flux, Chronometer t)
{
flux << t.hour << ":" << t.min << ":" << t.sec;
return flux;
}
Chronometer& Chronometer::operator=(const Chronometer& other)
{
// Guard self assignment
//if (this == &other)
return *this;
}
Chronometer::~Chronometer(){}
mychrono.hpp
#include <time.h>
#include <iostream>
#include <sstream>
#ifndef mychrono_hpp
#define mychrono_hpp
class Chronometer
{
private:
int hour, min, sec;
//std::stringstream ss;
//Chronometer chrono;
public:
Chronometer();
Chronometer& start_chrono(Chronometer& chrono);
Chronometer& finish_chrono(Chronometer& chrono);
friend std::ostream& operator<<(std::ostream& flux, Chronometer t);
Chronometer& operator=(const Chronometer& other);
~Chronometer();
};
#endif
My program runs well my two chronometers in parallel each other but still dependant of my while loop. For example here I will print "hello-world" once but need to wait my threads stop to print a second "hello-world" message in my while loop.
My question is how to make my threads runs in parallel an be completely independant of others instructions in my while loop ?

Tzig had a similar idea as mine, that is using condition variables and such.
I've made a full working example including comments and hopefully written for readability.
#include <chrono>
#include <iostream>
#include <iomanip>
#include <mutex>
#include <future>
#include <condition_variable>
//-----------------------------------------------------------------------------------------------------
// state of the timer.
enum class State
{
idle,
starting,
running,
stopping,
stopped
};
//-----------------------------------------------------------------------------------------------------
// helper class for use of std::condition_variable, makes code more readable
// takes into account the pitfalls of condition variables :
// https://www.modernescpp.com/index.php/c-core-guidelines-be-aware-of-the-traps-of-condition-variables
template<typename T>
class StateVariable
{
public:
StateVariable() = delete;
StateVariable(const StateVariable&) = delete;
StateVariable(StateVariable&&) = delete;
StateVariable& operator=(const StateVariable&) = delete;
explicit StateVariable(const T& value) :
m_value{ value }
{
}
void operator=(const T& value) noexcept
{
{
std::unique_lock<std::mutex> lock(m_value_mutex);
m_value = value;
}
m_value_changed.notify_all();
}
// atomic check and set
T set_if(const T& from_value, const T& to_value) noexcept
{
{
std::unique_lock<std::mutex> lock(m_value_mutex);
if (m_value != from_value) return from_value;
m_value = to_value;
}
m_value_changed.notify_all();
return to_value;
}
const bool try_wait_for(const T& value, const std::chrono::steady_clock::duration& duration) const noexcept
{
auto pred = [this, value] { return (m_value == value); };
std::unique_lock<std::mutex> lock(m_value_mutex);
if (pred()) return true;
return m_value_changed.wait_for(lock, duration, pred);
}
void wait_for(const T& value) const
{
try_wait_for(value, std::chrono::steady_clock::duration::max());
}
private:
// mutables so I could make the const promises on wait
// that they wont change the observable state (m_value)
// of this class.
mutable std::mutex m_value_mutex;
mutable std::condition_variable m_value_changed;
std::atomic<T> m_value;
};
//-----------------------------------------------------------------------------------------------------
// helper class for storing elapsed time, helps with readability later on
class ElapsedTime
{
public:
explicit ElapsedTime(const std::chrono::steady_clock::duration& duration) :
m_duration{ duration }
{
}
auto hours() const
{
return std::chrono::duration_cast<std::chrono::hours>(m_duration).count();
}
auto minutes() const
{
return (std::chrono::duration_cast<std::chrono::minutes>(m_duration).count() % 60);
}
auto seconds() const
{
return (std::chrono::duration_cast<std::chrono::seconds>(m_duration).count() % 60);
}
private:
std::chrono::steady_clock::duration m_duration;
};
//-----------------------------------------------------------------------------------------------------
// formatter for ElapsedTime
std::ostream& operator<<(std::ostream& os, const ElapsedTime& t)
{
os << std::setfill('0') << std::setw(2) << t.hours() << ':';
os << std::setfill('0') << std::setw(2) << t.minutes() << ':';
os << std::setfill('0') << std::setw(2) << t.seconds();
return os;
}
//-----------------------------------------------------------------------------------------------------
// ChronoMeter class
// note I use std::chrono classes
class ChronoMeter final
{
public:
ChronoMeter() :
m_state{ State::idle },
m_duration{ std::chrono::steady_clock::duration::min() }
{
};
ChronoMeter(const ChronoMeter&) = delete;
ChronoMeter(ChronoMeter&&) = delete;
ChronoMeter& operator=(const ChronoMeter&) = delete;
void Start()
{
m_start_time = std::chrono::steady_clock::now();
// exercise for the reader, also allow stopped Chronometers to be restarted.
// for now just this simple state model
if (m_state.set_if(State::idle, State::starting) != State::starting)
{
throw std::runtime_error("only an idle ChronoMeter can be started");
}
// it is okay to capture "this" because the destructor of the
// chronometer synchronizes with termination of this thread through the future
m_future = std::async(std::launch::async, [this]
{
// Set indication that the thread has really started.
// this is important because when std::async returns, this thread exists
// but may not have been scheduled yet.
m_state = State::running;
do
{
// assigning a value to m_duration isn't atomic so protect it.
// we might be reading the value on another thread which might
// result in reading an intermediate state.
std::scoped_lock<std::mutex> lock{ m_data_mtx };
m_duration = std::chrono::steady_clock::now() - m_start_time;
// using a statevariable to check for stopping means it can respond
// during the one second delay and stop immediately.
// this is an advantage over using sleep
} while (!m_state.try_wait_for(State::stopping, std::chrono::seconds(1)));
m_state = State::stopped;
});
// Wait for the thread to have really started
// this way we have a clear post condition for start
m_state.wait_for(State::running);
}
void Stop()
{
// only allow a running Chronometer to be stopped.
// in all other states Stop does nothing
if (m_state.set_if(State::running, State::stopping) == State::stopping)
{
// synchronization with stopped state, as set by other thread
m_state.wait_for(State::stopped);
// future get is not really needed for synchronization.
// but if thread threw an exception it's rethrown here
m_future.get();
}
}
~ChronoMeter()
{
// Automatically stop thread if this hasn't already happened.
Stop();
}
const ElapsedTime Elapsed() const
{
std::scoped_lock<std::mutex> lock{ m_data_mtx };
return ElapsedTime{ m_duration };
}
private:
std::future<void> m_future;
StateVariable<State> m_state;
mutable std::mutex m_data_mtx;
std::chrono::steady_clock::time_point m_start_time;
std::chrono::steady_clock::duration m_duration;
};
int main()
{
ChronoMeter meter1;
ChronoMeter meter2;
meter1.Start();
std::this_thread::sleep_for(std::chrono::seconds(5));
auto elapsed_1_1 = meter1.Elapsed();
std::cout << "Meter 1 elapsed time " << elapsed_1_1 << std::endl;
meter2.Start();
std::this_thread::sleep_for(std::chrono::seconds(4));
auto elapsed_1_2 = meter1.Elapsed();
auto elapsed_2 = meter2.Elapsed();
std::cout << "Meter 1 elapsed time " << elapsed_1_2 << std::endl;
std::cout << "Meter 2 elapsed time " << elapsed_2 << std::endl;
meter1.Stop();
// not stopping meter2 (and it's thread) explicitly, this is done safely by destructor if needed
return 0;
}

I usually solve this problem by making the multithreaded object handle everything that has to do with multithreading, here's how I solved it in your case (I ended up rewriting a lot of things so maybe the behavior isn't exactly the one you want, you can use my code as a starting point):
main.cpp:
#include <iostream>
#include <vector>
#include "mychrono.hpp"
int main()
{
std::vector<Chronometer*> car_crono;
for(int i = 0; i < 2; i++)
{
car_crono.push_back(new Chronometer);
}
while (1) {
// std::cout << "hello-world" << std::endl;
Chronometer::Time t = car_crono[0]->get_time();
if(t.sec >= 10)
car_crono[0]->reset_chrono();
std::cout << "Seconds of T0: " << t.sec << std::endl;
std::cout << "T1: " << car_crono[1]->to_string() << std::endl;
}
car_crono.clear();
}
mychrono.hpp:
#ifndef mychrono_hpp
#define mychrono_hpp
#include <iostream>
#include <thread>
#include <memory>
#include <condition_variable>
#include <mutex>
#include <atomic>
class Chronometer
{
public:
struct Time {
int hour;
int min;
int sec;
};
Chronometer();
void reset_chrono();
friend std::ostream& operator<<(std::ostream& flux, Chronometer& t);
Chronometer& operator=(const Chronometer& other);
std::string to_string();
Time get_time();
~Chronometer();
private:
Time currentTime;
std::mutex timeMutex;
std::condition_variable conditionVariable;
std::unique_ptr<std::thread> thread;
std::mutex CVMutex;
std::atomic<bool> exitNow;
void thread_function();
};
#endif
mychrono.cpp:
#include "mychrono.hpp"
Chronometer::Chronometer() : currentTime.hour(0), currentTime.min(0), currentTime.sec(0)
{
thread.reset(new std::thread(&Chronometer::thread_function, this));
}
void Chronometer::reset_chrono()
{
std::lock_guard<std::mutex> lock(timeMutex);
currentTime.hour = 0;
currentTime.sec = 0;
currentTime.min = 0;
}
std::ostream& operator<<(std::ostream& flux, Chronometer& t)
{
flux << t.to_string();
return flux;
}
Chronometer& Chronometer::operator=(const Chronometer& other)
{
// Guard self assignment
//if (this == &other)
return *this;
}
std::string Chronometer::to_string()
{
std::lock_guard<std::mutex> lock(timeMutex);
return std::to_string(currentTime.hour) + ":" + std::to_string(currentTime.min) + ":" + std::to_string(currentTime.sec);
}
Time Chronometer::get_time()
{
return currentTime;
}
Chronometer::~Chronometer()
{
exitNow = true;
{
std::unique_lock<std::mutex> lock(CVMutex);
lock.unlock();
conditionVariable.notify_all();
}
thread->join();
}
void Chronometer::thread_function()
{
std::unique_lock<std::mutex> waitLock(CVMutex);
while(!exitNow)
{
sec++;
if(currentTime.sec > 59) {
std::lock_guard<std::mutex> lock(timeMutex);
currentTime.min++;
currentTime.sec = 0;
}
if(currentTime.min > 59) {
std::lock_guard<std::mutex> lock(timeMutex);
currentTime.hour++;
currentTime.sec = 0;
currentTime.min = 0;
}
// std::cout << "chrono: " << *this << std::endl; //Not thread safe be careful
conditionVariable.wait_for(waitLock, std::chrono::seconds(1));
}
}
EDIT: About your latest comment: you don't need to reset a chrono in its destructor as the data will be destroyed anyway. If you want to reset the counter while it's running you want to call Chronometer::reset_chrono() from you main function.
For the second part of your comment, I added a get_time function to the code (I also added a mutex to avoid data races, I completly forgot when I wrote the original answer). When you want to get the current time of a chrono from the main function you just call get_time() and use the struct it returns to get the info you want.
I added a small example to show how to use both functions. As you can see, the main function doesn't even need to know what threads are !
I may be wrong but from the questions you ask I feel maybe you're not used to how multithreading works. It's a very difficult concept and one of the few I feel you can't learn only through experience, if that's the case you might want to learn about it from dedicated sites such as this one. I think I pieced together that you speak french, here's a really good article (that was never finished apparently) about the theory of it and another one in french, more about the specifics of C++. If you understand the core concepts and just have a hard time with my code, I plan on commenting it all but for now Pepijn Kramer did a great job explaining what they did in their response.

Related

Simple worker thread in C++ class

Assume that there is a class which contains some data and calculates some results given queries, and the queries take a relatively large amount of time.
An example class (everything dummy) is:
#include <vector>
#include <numeric>
#include <thread>
struct do_some_work
{
do_some_work(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
{}
void update_query(size_t x) {
if (x < _data.size()) {
_current_query = x;
recalculate_result();
}
}
int get_result() const {
return _last_calculated_result;
}
private:
void recalculate_result() {
//dummy some work here
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
_last_calculated_result = std::accumulate(_data.cbegin(), _data.cbegin() + _current_query, 0);
}
std::vector<int> const _data;
size_t _current_query;
int _last_calculated_result;
};
and this can be used in the main code like:
#include <algorithm>
int main()
{
//make some dummy data
std::vector<int> test_data(20, 0);
std::iota(test_data.begin(), test_data.end(), 0);
{
do_some_work work(test_data);
for (size_t i = 0; i < test_data.size(); ++i) {
work.update_query(i);
std::cout << "result = {" << i << "," << work.get_result() << "}" << std::endl;
}
}
}
The above will wait in the main function a lot.
Now, assuming we want to run this querying in a tight loop (say GUI) and only care about about getting a "recent" result quickly when we query.
So, we want to move the work to a separate thread which calculates the results, and updates it, and when we get result, we get the last calculated one. That is, we want to change do_some_work class to do its work on a thread, with minimal changes (essentially find a pattern of changes that can be applied to (mostly) any class of this type).
My stab at this is the following:
#include <vector>
#include <numeric>
#include <mutex>
#include <thread>
#include <condition_variable>
#include <iostream>
struct do_lots_of_work
{
do_lots_of_work(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
, _worker()
, _data_mtx()
, _result_mtx()
, _cv()
, _do_exit(false)
, _work_available(false)
{
start_worker();
}
void update_query(size_t x) {
{
if (x < _data.size()) {
std::lock_guard<std::mutex> lck(_data_mtx);
_current_query = x;
_work_available = true;
_cv.notify_one();
}
}
}
int get_result() const {
std::lock_guard<std::mutex> lck(_result_mtx);
return _last_calculated_result;
}
~do_lots_of_work() {
stop_worker();
}
private:
void start_worker() {
if (!_worker.joinable()) {
std::cout << "starting worker..." << std::endl;
_worker = std::thread(&do_lots_of_work::worker_loop, this);
}
}
void stop_worker() {
std::cout << "worker stopping..." << std::endl;
if (_worker.joinable()) {
std::unique_lock<std::mutex> lck(_data_mtx);
_do_exit = true;
lck.unlock();
_cv.notify_one();
_worker.join();
}
std::cout << "worker stopped" << std::endl;
}
void worker_loop() {
std::cout << "worker started" << std::endl;
while (true) {
std::unique_lock<std::mutex> lck(_data_mtx);
_cv.wait(lck, [this]() {return _work_available || _do_exit; });
if (_do_exit) { break; }
if (_work_available) {
_work_available = false;
int query = _current_query; //take local copy
lck.unlock(); //unlock before doing lots of work.
recalculate_result(query);
}
}
}
void recalculate_result(int query) {
//dummy lots of work here
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
int const result = std::accumulate(_data.cbegin(), _data.cbegin() + query, 0);
set_result(result);
}
void set_result(int result) {
std::lock_guard<std::mutex> lck(_result_mtx);
_last_calculated_result = result;
}
std::vector<int> const _data;
size_t _current_query;
int _last_calculated_result;
std::thread _worker;
mutable std::mutex _data_mtx;
mutable std::mutex _result_mtx;
std::condition_variable _cv;
bool _do_exit;
bool _work_available;
};
and the usage is (example):
#include <algorithm>
int main()
{
//make some dummy data
std::vector<int> test_data(20, 0);
std::iota(test_data.begin(), test_data.end(), 0);
{
do_lots_of_work work(test_data);
for (size_t i = 0; i < test_data.size(); ++i) {
work.update_query(i);
std::this_thread::sleep_for(std::chrono::milliseconds(500));
std::cout << "result = {" << i << "," << work.get_result() << "}" << std::endl;
}
}
}
This seems to work, giving the last result, not stopping the main function etc.
But, this looks a LOT of changes are required to add a worker thread to a simple class like do_some_work. Items like two mutexes (one for the worker/main interaction data, and one for the result), one condition_variable, one more-work-available flag and one do-exit flag, that is quite a bit. I guess we don't want an async kind of mechanism because we don't want to potentially launch a new thread every time.
Now, I am not sure if there is a MUCH simpler pattern to make this kind of change, but it feels like there should be. A kind of pattern that can be used to off-load work to a thread.
So finally, my question is, can do_some_work be converted into do_lots_of_work in a much simpler way than the implementation above?
Edit (Solution 1) ThreadPool based:
Using a threadpool, the worker loop can be skipped, we need two mutexes, for result and query. Lock in updating query, Lock in getting result, Both lock in recalculate (take a local copy of a query, and write to result).
Note: Also, when pushing work on the queue, as we do not care about the older results, we can clear the work queue.
Example implementation (using the CTPL threadpool)
#include "CTPL\ctpl_stl.h"
#include <vector>
#include <mutex>
struct do_lots_of_work_with_threadpool
{
do_lots_of_work_with_threadpool(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
, _pool(1)
, _result_mtx()
, _query_mtx()
{
}
void update_query(size_t x) {
if (x < _data.size()) {
std::lock_guard<std::mutex> lck(_query_mtx);
_current_query = x;
}
_pool.clear_queue(); //clear as we don't want to calculate any out-date results.
_pool.push([this](int id) { recalculate_result(); });
}
int get_result() const {
std::lock_guard<std::mutex> lck(_result_mtx);
return _last_calculated_result;
}
private:
void recalculate_result() {
//dummy some work here
size_t query;
{
std::lock_guard<std::mutex> lck(_query_mtx);
query = _current_query;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
int result = std::accumulate(_data.cbegin(), _data.cbegin() + query, 0);
{
std::lock_guard<std::mutex> lck(_result_mtx);
_last_calculated_result = result;
}
}
std::vector<int> const _data;
size_t _current_query;
int _last_calculated_result;
ctpl::thread_pool _pool;
mutable std::mutex _result_mtx;
mutable std::mutex _query_mtx;
};
Edit (Solution 2) With ThreadPool and Atomic:
This solution changes the shared variables to atomic, and so we do not need any mutexes and do not have to consider taking/releasing locks etc. This is much simpler and very close to the original class (of course assumes a threadpool type exists somewhere as it is not part of the standard).
#include "CTPL\ctpl_stl.h"
#include <vector>
#include <mutex>
#include <atomic>
struct do_lots_of_work_with_threadpool_and_atomics
{
do_lots_of_work_with_threadpool_and_atomics(std::vector<int> data)
: _data(std::move(data))
, _current_query(0)
, _last_calculated_result(0)
, _pool(1)
{
}
void update_query(size_t x) {
if (x < _data.size()) {
_current_query.store(x);
}
_pool.clear_queue(); //clear as we don't want to calculate any out-date results.
_pool.push([this](int id) { recalculate_result(); });
}
int get_result() const {
return _last_calculated_result.load();
}
private:
void recalculate_result() {
//dummy some work here
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
_last_calculated_result.store(std::accumulate(_data.cbegin(), _data.cbegin() + _current_query.load(), 0));
}
std::vector<int> const _data;
std::atomic<size_t> _current_query;
std::atomic<int> _last_calculated_result;
ctpl::thread_pool _pool;
};

Pointer references in Lambda

struct taskinfo
{
long int id;
bool cancel;
std::function<void()> func;
std::chrono::system_clock::time_point time;
std::chrono::system_clock::duration interval;
taskinfo(){ }
bool operator<(const taskinfo& task) const {
return time > task.time;
}
public:
taskinfo(long int id, std::function<void()>&& f, const std::chrono::system_clock::time_point& t)
: id(id), func(f),
time(t)
{
cancel = false;
}
}
....
std::priority_queue<taskinfo, std::vector<taskinfo>> tasks;
void at(taskinfo** task){
std::function<void()> threadFunc = [task]() { std::thread((*task)->func).detach(); };
(*task)->func = threadFunc;
tasks.push(**task);
}
In main()..
std::vector<taskinfo*> requests;
for(int i=1; i <=5; i++ )
{
taskinfo* t = new taskinfo(i, [i]{ timeoutFunc(i); }, std::chrono::system_clock::now() + std::chrono::milliseconds(timeout));
tT.at(&t);
requests.push_back(t);
std::cout << "Request " << i << " Registered.... Time:" << std::chrono::system_clock::now().time_since_epoch().count() << std::endl;
}
I think I am missing something here when I pop the function out of the queue to execute, the function may be empty, Nothing is getting executed.
If i copy taskinfo to locally
void at(taskinfo** task){
taskinfo t = **task;
//Replace everything else with t function works fine But
//I need to modify the same reference
}
How can I work with pointer reference herein lambda?
I have added the complete code of what i am trying to do here.
Complete Code:
#include <functional>
#include <chrono>
#include <future>
#include <queue>
#include <thread>
#include <memory>
#include <sstream>
#include <assert.h>
#include <iostream>
#include <ctime>
#include <sys/time.h>
#include <unistd.h>
#include <limits.h>
#define TIMER_NO_TASK_SLEEP_TIME 100
struct taskinfo
{
long int id;
bool cancel;
std::function<void()> func;
std::chrono::system_clock::time_point time;
std::chrono::system_clock::duration interval;
taskinfo(){ }
bool operator<(const taskinfo& task) const {
return time > task.time;
}
public:
taskinfo(long int id, std::function<void()>&& f, const std::chrono::system_clock::time_point& t)
: id(id), func(f),
time(t)
{
cancel = false;
}
};
class TimerTask
{
private:
std::priority_queue<taskinfo, std::vector<taskinfo>> tasks;
std::unique_ptr<std::thread> thread;
bool keepRunning;
public:
TimerTask()
:keepRunning(true),
thread(new std::thread([this]() {
while(keepRunning)
{
auto now = std::chrono::system_clock::now();
while(!tasks.empty() && tasks.top().time <= now) {
if(!tasks.top().cancel)
{
tasks.top().func();
}
tasks.pop();
}
if(tasks.empty()) {
std::this_thread::sleep_for(std::chrono::milliseconds(TIMER_NO_TASK_SLEEP_TIME));
} else {
std::this_thread::sleep_for(tasks.top().time - std::chrono::system_clock::now());
}
}
})){ }
~TimerTask()
{
keepRunning = false;
thread->join();
}
//Execute a task when the timer times out
void at(taskinfo** task){
std::function<void()> threadFunc = [task]() { std::thread((*task)->func).detach(); };
(*task)->func = threadFunc;
tasks.push(**task);
}
//Cancel the particular task with a flag
void cancel(taskinfo** task){
(* task)->cancel = true;
}
};
//The return type of the task must be void
void timeoutFunc(int id)
{
std::cout << "Request " << id << " Timeout.... Executed Timeout Function Time:" << std::chrono::system_clock::now().time_since_epoch().count() << std::endl;
}
int main(int argc, char* argv[])
{
if(argc != 2)
{
std::cout << "\n Usage <Process> <Timeout>" << std::endl;
return 0;
}
int timeout = atoi(argv[1]);
TimerTask tT;
std::vector<taskinfo*> requests;
requests.reserve(1000);
for(int i=1; i <=5; i++ )
{
taskinfo* t = new taskinfo(i, [i]{ timeoutFunc(i); }, std::chrono::system_clock::now() + std::chrono::milliseconds(timeout));
tT.at(&t);
requests.push_back(t);
std::cout << "Request " << i << " Registered.... Time:" << std::chrono::system_clock::now().time_since_epoch().count() << std::endl;
}
while(1) sleep(60);
return 0;
}
You are passing a pointer to a pointer which no longer exists:
taskinfo* t = new taskinfo(i, [i]{ timeoutFunc(i); }, std::chrono::system_clock::now() + std::chrono::milliseconds(timeout));
tT.at(&t);
requests.push_back(t);
In the above code t is a local variable instantiated for each iteration through the loop. You get a new t every time.
The code tT.at(&t); gets the address of this temporary.
The fix is at the calling site call: tT.at(t);. notice how this is just like requests.push_back(t);
Also:
//Execute a task when the timer times out
void TimerTask::at(taskinfo* task){
std::function<void()> threadFunc = [task]() { std::thread(task->func).detach(); };
task->func = threadFunc;
...
}

std::thread throwing "resource dead lock would occur"

I have a list of objects, each object has member variables which are calculated by an "update" function. I want to update the objects in parallel, that is I want to create a thread for each object to execute it's update function.
Is this a reasonable thing to do? Any reasons why this may not be a good idea?
Below is a program which attempts to do what I described, this is a complete program so you should be able to run it (I'm using VS2015). The goal is to update each object in parallel. The problem is that once the update function completes, the thread throws an "resource dead lock would occur" exception and aborts.
Where am I going wrong?
#include <iostream>
#include <thread>
#include <vector>
#include <algorithm>
#include <thread>
#include <mutex>
#include <chrono>
class Object
{
public:
Object(int sleepTime, unsigned int id)
: m_pSleepTime(sleepTime), m_pId(id), m_pValue(0) {}
void update()
{
if (!isLocked()) // if an object is not locked
{
// create a thread to perform it's update
m_pThread.reset(new std::thread(&Object::_update, this));
}
}
unsigned int getId()
{
return m_pId;
}
unsigned int getValue()
{
return m_pValue;
}
bool isLocked()
{
bool mutexStatus = m_pMutex.try_lock();
if (mutexStatus) // if mutex is locked successfully (meaning it was unlocked)
{
m_pMutex.unlock();
return false;
}
else // if mutex is locked
{
return true;
}
}
private:
// private update function which actually does work
void _update()
{
m_pMutex.lock();
{
std::cout << "thread " << m_pId << " sleeping for " << m_pSleepTime << std::endl;
std::chrono::milliseconds duration(m_pSleepTime);
std::this_thread::sleep_for(duration);
m_pValue = m_pId * 10;
}
m_pMutex.unlock();
try
{
m_pThread->join();
}
catch (const std::exception& e)
{
std::cout << e.what() << std::endl; // throws "resource dead lock would occur"
}
}
unsigned int m_pSleepTime;
unsigned int m_pId;
unsigned int m_pValue;
std::mutex m_pMutex;
std::shared_ptr<std::thread> m_pThread; // store reference to thread so it doesn't go out of scope when update() returns
};
typedef std::shared_ptr<Object> ObjectPtr;
class ObjectManager
{
public:
ObjectManager()
: m_pNumObjects(0){}
void updateObjects()
{
for (int i = 0; i < m_pNumObjects; ++i)
{
m_pObjects[i]->update();
}
}
void removeObjectByIndex(int index)
{
m_pObjects.erase(m_pObjects.begin() + index);
}
void addObject(ObjectPtr objPtr)
{
m_pObjects.push_back(objPtr);
m_pNumObjects++;
}
ObjectPtr getObjectByIndex(unsigned int index)
{
return m_pObjects[index];
}
private:
std::vector<ObjectPtr> m_pObjects;
int m_pNumObjects;
};
void main()
{
int numObjects = 2;
// Generate sleep time for each object
std::vector<int> objectSleepTimes;
objectSleepTimes.reserve(numObjects);
for (int i = 0; i < numObjects; ++i)
objectSleepTimes.push_back(rand());
ObjectManager mgr;
// Create some objects
for (int i = 0; i < numObjects; ++i)
mgr.addObject(std::make_shared<Object>(objectSleepTimes[i], i));
// Print expected object completion order
// Sort from smallest to largest
std::sort(objectSleepTimes.begin(), objectSleepTimes.end());
for (int i = 0; i < numObjects; ++i)
std::cout << objectSleepTimes[i] << ", ";
std::cout << std::endl;
// Update objects
mgr.updateObjects();
int numCompleted = 0; // number of objects which finished updating
while (numCompleted != numObjects)
{
for (int i = 0; i < numObjects; ++i)
{
auto objectRef = mgr.getObjectByIndex(i);
if (!objectRef->isLocked()) // if object is not locked, it is finished updating
{
std::cout << "Object " << objectRef->getId() << " completed. Value = " << objectRef->getValue() << std::endl;
mgr.removeObjectByIndex(i);
numCompleted++;
}
}
}
system("pause");
}
Looks like you've got a thread that is trying to join itself.
While I was trying to understand your solution I was simplifying it a lot. And I come to point that you use std::thread::join() method in a wrong way.
std::thread provide capabilities to wait for it completion (non-spin wait) -- In your example you wait for thread completion in infinite loop (snip wait) that will consume CPU time heavily.
You should call std::thread::join() from other thread to wait for thread completion. Mutex in Object in your example is not necessary. Moreover, you missed one mutex to synchronize access to std::cout, which is not thread-safe. I hope the example below will help.
#include <iostream>
#include <thread>
#include <vector>
#include <algorithm>
#include <thread>
#include <mutex>
#include <chrono>
#include <cassert>
// cout is not thread-safe
std::recursive_mutex cout_mutex;
class Object {
public:
Object(int sleepTime, unsigned int id)
: _sleepTime(sleepTime), _id(id), _value(0) {}
void runUpdate() {
if (!_thread.joinable())
_thread = std::thread(&Object::_update, this);
}
void waitForResult() {
_thread.join();
}
unsigned int getId() const { return _id; }
unsigned int getValue() const { return _value; }
private:
void _update() {
{
{
std::lock_guard<std::recursive_mutex> lock(cout_mutex);
std::cout << "thread " << _id << " sleeping for " << _sleepTime << std::endl;
}
std::this_thread::sleep_for(std::chrono::seconds(_sleepTime));
_value = _id * 10;
}
std::lock_guard<std::recursive_mutex> lock(cout_mutex);
std::cout << "Object " << getId() << " completed. Value = " << getValue() << std::endl;
}
unsigned int _sleepTime;
unsigned int _id;
unsigned int _value;
std::thread _thread;
};
class ObjectManager : public std::vector<std::shared_ptr<Object>> {
public:
void runUpdate() {
for (auto it = this->begin(); it != this->end(); ++it)
(*it)->runUpdate();
}
void waitForAll() {
auto it = this->begin();
while (it != this->end()) {
(*it)->waitForResult();
it = this->erase(it);
}
}
};
int main(int argc, char* argv[]) {
enum {
TEST_OBJECTS_NUM = 2,
};
srand(static_cast<unsigned int>(time(nullptr)));
ObjectManager mgr;
// Generate sleep time for each object
std::vector<int> objectSleepTimes;
objectSleepTimes.reserve(TEST_OBJECTS_NUM);
for (int i = 0; i < TEST_OBJECTS_NUM; ++i)
objectSleepTimes.push_back(rand() * 9 / RAND_MAX + 1); // 1..10 seconds
// Create some objects
for (int i = 0; i < TEST_OBJECTS_NUM; ++i)
mgr.push_back(std::make_shared<Object>(objectSleepTimes[i], i));
assert(mgr.size() == TEST_OBJECTS_NUM);
// Print expected object completion order
// Sort from smallest to largest
std::sort(objectSleepTimes.begin(), objectSleepTimes.end());
for (size_t i = 0; i < mgr.size(); ++i)
std::cout << objectSleepTimes[i] << ", ";
std::cout << std::endl;
// Update objects
mgr.runUpdate();
mgr.waitForAll();
//system("pause"); // use Ctrl+F5 to run the app instead. That's more reliable in case of sudden app exit.
}
About is it a reasonable thing to do...
A better approach is to create an object update queue. Objects that need to be updated are added to this queue, which can be fulfilled by a group of threads instead of one thread per object.
The benefits are:
No 1-to-1 correspondence between thread and objects. Creating a thread is a heavy operation, probably more expensive than most update code for a single object.
Supports thousands of objects: with your solution you would need to create thousands of threads, which you will find exceeds your OS capacity.
Can support additional features like declaring dependencies between objects or updating a group of related objects as one operation.

std::future.get() sometimes stuck in os x

I have two threads, one thread should receive and process requests from another. The second is to transfer requests and receive a response synchronously. I tried the following scheme: queue of pair (value, promise). The first thread creates a promise and pushed it in synchronous queue and waiting for a return value by future.get()
The problem is that sometimes the thread stuck on future.get(), but when i pause program execution and continue it again works correctly. this stucks has random nature.
FutureQueue.h
#ifndef FutureQueue_h
#define FutureQueue_h
#include <queue>
#include <future>
#include <thread>
#include <mutex>
#include <condition_variable>
template <typename T, typename R>
class Work{
public:
Work(){
}
Work(T value, std::promise<R>* promise){
m_value = value;
m_p_promise = promise;
}
std::promise<R>* m_p_promise;
T m_value;
public:
T getValue(){
return m_value;
}
void setResult(R result){
m_p_promise->set_value(result);
}
};
template <typename T, typename R>
class FutureQueue
{
public:
Work<T,R> getWork(){
auto p = pop();
return Work<T,R>(p.first,p.second);
}
R execute(T value)
{
std::promise<R> promise = std::promise<R>();
std::future<R> f = promise.get_future();
auto p = std::pair<T, std::promise<R>*>(value, &promise);
push(p);
return f.get();
}
private:
std::pair<T,std::promise<R>*> pop(){
std::unique_lock<std::mutex> mlock(mutex_);
while (queue_.empty())
{
cond_.wait(mlock);
}
auto item = queue_.front();
queue_.pop();
return item;
}
void push(const std::pair<T,std::promise<R>*>& item){
std::unique_lock<std::mutex> mlock(mutex_);
queue_.push(item);
mlock.unlock();
cond_.notify_one();
}
std::queue<std::pair<T,std::promise<R>*>> queue_;
std::mutex mutex_;
std::condition_variable cond_;
};
#endif
main.cpp
#include <iostream>
#include <thread>
#include "FutureQueue.h"
using namespace std;
atomic<bool> quit;
FutureQueue<int, int> mQueue;
void consumer(){
Work<int,int> work;
while(true){
work = mQueue.getWork();
if (quit){
break;
}
int value = work.getValue()+100;
work.setResult(value);
}
work.setResult(0);
}
int main(int argc, const char * argv[]) {
quit = false;
thread thread(consumer);
// test 2
for (int i=0;i<100000;i++){
int value = mQueue.execute(i);
cout << "input " << i <<" execute result " << value << endl;
}
quit = true;
mQueue.execute(-1);
thread.join();
return 0;
}
I don't know whats wrong with this code, maybe you can suggest better solution. Thank you
UPDATE
Stucks occurs only in os x with Apple LLVM version 6.0
There is no problem under gcc on OS X and Linux and Visual studio on Windows
There are two threads, A with
for (int i=0;i<100000;i++){
int value = mQueue.execute(i);
cout << "input " << i <<" execute result " << value << endl;
}
quit = true;
mQueue.execute(-1);
B with
thread thread(consumer);
You expect B first Run, then stuck because
while (queue_.empty())
{
cond_.wait(mlock);
}
B will continue run until A run the following code
cond_.notify_one();
Usually it will be ok. But if A first fun "cond.notify_one()", the B call "con_.wait(mlock)", the B will be stuck forever.

Resetting sleeping time of a thread

Suppose to have a thread like this
void mythread()
{
int res;
while(1) {
{
boost::lock_guard<boost::mutex> lock(mylock);
res = do_my_stuff();
}
boost::this_thread::sleep(boost::posix_time::seconds(5));
}
}
and that the thread is currently sleeping. If something happens outside of the thread, I'd like to be able to increase the sleep time.
What is the best way to do it?
Using a condition_variable to signal changes to the deadline
This has the benefit of supporting scenarios where the timeout is shortened:
See it Live On Coliru
#include <thread>
#include <chrono>
#include <iostream>
#include <condition_variable>
namespace demo
{
namespace chrono = std::chrono;
using our_clock = chrono::system_clock;
struct Worker
{
mutable std::mutex _mx;
// shared, protected by _mx:
our_clock::time_point _deadline;
mutable std::condition_variable _cv;
Worker(our_clock::time_point deadline) : _deadline(deadline) {}
void operator()() const {
std::unique_lock<std::mutex> lk(_mx);
_cv.wait_until(lk, _deadline, [this]
{
std::cout << "worker: Signaled\n";
auto now = our_clock::now();
if (now >= _deadline)
return true;
std::cout << "worker: Still waiting " << chrono::duration_cast<chrono::milliseconds>(_deadline - now).count() << "ms...\n";
return false;
});
std::cout << "worker: Done\n";
}
};
}
int main()
{
using namespace demo;
Worker worker(our_clock::now() + chrono::seconds(2));
auto th = std::thread(std::cref(worker));
// after 2 seconds, update the timepoint
std::this_thread::sleep_for(chrono::seconds(1));
{
std::lock_guard<std::mutex> lk(worker._mx);
std::cout << "Updating shared delay value..." << "\n";
worker._deadline = our_clock::now() + chrono::seconds(1);
worker._cv.notify_all();
}
th.join();
}
C++11 standard library (no signaling)
Here's a standard-library only approach which uses no synchronization around the deadline.
I'd have preferred to use atomic time_point for the deadline value itself, but that's not supported. Next best thing would have been shared_ptr<time_point> (with std::atomic_load/atomic_store) but my compiler's library doesn't implement this yet (grrr).
So, instead, I share the 'offset' since a start time:
#include <thread>
#include <chrono>
#include <iostream>
#include <atomic>
namespace demo
{
namespace chrono = std::chrono;
using our_clock = chrono::system_clock;
using shared_delay = std::atomic<our_clock::duration>;
void worker(our_clock::time_point const start, shared_delay const& delay)
{
for (our_clock::time_point deadline; our_clock::now() < (deadline = start + delay.load());)
{
std::cout << "worker: Sleeping for " << chrono::duration_cast<chrono::milliseconds>(deadline - our_clock::now()).count() << "ms...\n";
std::this_thread::sleep_until(deadline);
}
std::cout << "worker: Done\n";
}
}
int main()
{
using namespace demo;
shared_delay delay(chrono::seconds(2));
auto th = std::thread(worker, our_clock::now(), std::cref(delay));
// after 2 seconds, update the timepoint
std::this_thread::sleep_for(chrono::seconds(1));
std::cout << "Updating shared delay value..." << "\n";
delay.store(chrono::seconds(3));
th.join();
}
See it Live on Coliru
Here is a quick and dirty method:
volatile bool someCondition = false;
void callFromOtherThread(bool x) {
boost::lock_guard<boost::mutex> lock(mylock2);
someCondition = x;
}
void mythread()
{
int res;
while(1) {
bool keepwaiting = false;
{
boost::lock_guard<boost::mutex> lock(mylock2);
keepwaiting = someCondition;
}
if (!keepwaiting) {
boost::lock_guard<boost::mutex> lock(mylock);
res = do_my_stuff();
}
boost::this_thread::sleep(boost::posix_time::seconds(5));
}
}
When your thread finished sleeping, it checks it 'something else' happened, and if it did, it skips 'do_my_stuff()' and goes back to sleep again.
I suspect with somewhat more information about your use case it might be possible to rewrite things to use a condition variable.