I am trying to chained coroutines. Foo2 will actually go async. Once Foo2 resume, the code should execute in the order of "resume Foo2" and "resume Foo1" (like 2 continuation). I am not clear on some details. First, when co_await b suspends, does it return a promise object immediately to the caller? Then co_await Foo2() happens. At this point I need to suspend but don't want to fire off thread t(run). Somewhere I think I need to wrap the promise/awaiter from Foo2() before co_await on it in Foo1().
void run(std::coroutine_handle<> h)
{
std::cout<<std::this_thread::get_id()<<" "<<"in Run\n";
std::this_thread::sleep_for (std::chrono::seconds(5));
h.resume();
}
template<typename T>
struct task{
struct promise_type {
T val_;
task get_return_object() { return {.h_ = std::coroutine_handle<promise_type>::from_promise(*this)}; }
std::suspend_never initial_suspend() { return {}; }
std::suspend_never final_suspend() noexcept { return {}; }
void return_value(T val) { val_ = val; }
void unhandled_exception() {}
};
bool await_ready() { return false; }
void await_suspend(std::coroutine_handle<> h)
{
std::thread t(run, h);
t.detach();
}
void await_resume() { }
std::coroutine_handle<promise_type> h_;
};
template<typename T>
task<T> Foo2()
{
std::cout<<std::this_thread::get_id()<<" "<<"in Foo2\n";
task<T> b;
co_await b;
std::cout<<std::this_thread::get_id()<<" resume Foo2\n";
}
template<typename T>
task<T> Foo1()
{
std::cout<<std::this_thread::get_id()<<" "<<"in Foo1\n";
co_await Foo2<T>();
std::cout<<std::this_thread::get_id()<<" resume Foo1\n";
}
int main()
{
Foo1<int>();
std::cout<<std::this_thread::get_id()<<" ""main end\n";
std::this_thread::sleep_for (std::chrono::seconds(30));
}
does it return a promise object immediately
Promise objects are never returned. Promise object is created and stored inside the coroutine frame. It is destroyed together with coroutine frame.
What is returned to caller is "return object" of coroutine. It is created from object that is returned from promise::get_return_object() function when the coroutine first suspends (or is done). (promise::get_return_object() is called before the body of the coroutine starts its execution)
At this point I need to suspend...
In order to await completion of another coroutine current coroutine needs to be suspended and stored somewhere to be resumed after awaited coroutine completes.
It can be stored outside in some context that is responsible for spinning coroutines (something like io_service) or inside awaited coroutine.
Here is an example of asynchronous (and single-threaded) task<T> coroutine that is awaitable.
It stores suspended coroutine inside coroutine promise of awaited coroutine and resumes it after coroutine value is computed.
#include <coroutine>
#include <optional>
#include <iostream>
#include <thread>
#include <chrono>
#include <queue>
#include <vector>
// basic coroutine single-threaded async task example
template<typename T>
struct task_promise_type;
// simple single-threaded timer for coroutines
void submit_timer_task(std::coroutine_handle<> handle, std::chrono::seconds timeout);
template<typename T>
struct task;
template<typename T>
struct task_promise_type
{
// value to be computed
// when task is not completed (coroutine didn't co_return anything yet) value is empty
std::optional<T> value;
// corouine that awaiting this coroutine value
// we need to store it in order to resume it later when value of this coroutine will be computed
std::coroutine_handle<> awaiting_coroutine;
// task is async result of our coroutine
// it is created before execution of the coroutine body
// it can be either co_awaited inside another coroutine
// or used via special interface for extracting values (is_ready and get)
task<T> get_return_object();
// there are two kinds of coroutines:
// 1. eager - that start its execution immediately
// 2. lazy - that start its execution only after 'co_await'ing on them
// here I used eager coroutine task
// eager: do not suspend before running coroutine body
std::suspend_never initial_suspend()
{
return {};
}
// store value to be returned to awaiting coroutine or accessed through 'get' function
void return_value(T val)
{
value = std::move(val);
}
void unhandled_exception()
{
// alternatively we can store current exeption in std::exception_ptr to rethrow it later
std::terminate();
}
// when final suspend is executed 'value' is already set
// we need to suspend this coroutine in order to use value in other coroutine or through 'get' function
// otherwise promise object would be destroyed (together with stored value) and one couldn't access task result
// value
auto final_suspend() noexcept
{
// if there is a coroutine that is awaiting on this coroutine resume it
struct transfer_awaitable
{
std::coroutine_handle<> awaiting_coroutine;
// always stop at final suspend
bool await_ready() noexcept
{
return false;
}
std::coroutine_handle<> await_suspend(std::coroutine_handle<task_promise_type> h) noexcept
{
// resume awaiting coroutine or if there is no coroutine to resume return special coroutine that do
// nothing
return awaiting_coroutine ? awaiting_coroutine : std::noop_coroutine();
}
void await_resume() noexcept {}
};
return transfer_awaitable{awaiting_coroutine};
}
// there are multiple ways to add co_await into coroutines
// I used `await_transform`
// use `co_await std::chrono::seconds{n}` to wait specified amount of time
auto await_transform(std::chrono::seconds duration)
{
struct timer_awaitable
{
std::chrono::seconds duration;
// always suspend
bool await_ready()
{
return false;
}
// h is a handler for current coroutine which is suspended
void await_suspend(std::coroutine_handle<task_promise_type> h)
{
// submit suspended coroutine to be resumed after timeout
submit_timer_task(h, duration);
}
void await_resume() {}
};
return timer_awaitable{duration};
}
// also we can await other task<T>
template<typename U>
auto await_transform(task<U>& task)
{
if (!task.handle) {
throw std::runtime_error("coroutine without promise awaited");
}
if (task.handle.promise().awaiting_coroutine) {
throw std::runtime_error("coroutine already awaited");
}
struct task_awaitable
{
std::coroutine_handle<task_promise_type<U>> handle;
// check if this task already has value computed
bool await_ready()
{
return handle.promise().value.has_value();
}
// h - is a handle to coroutine that calls co_await
// store coroutine handle to be resumed after computing task value
void await_suspend(std::coroutine_handle<> h)
{
handle.promise().awaiting_coroutine = h;
}
// when ready return value to a consumer
auto await_resume()
{
return std::move(*(handle.promise().value));
}
};
return task_awaitable{task.handle};
}
};
template<typename T>
struct task
{
// declare promise type
using promise_type = task_promise_type<T>;
task(std::coroutine_handle<promise_type> handle) : handle(handle) {}
task(task&& other) : handle(std::exchange(other.handle, nullptr)) {}
task& operator=(task&& other)
{
if (handle) {
handle.destroy();
}
handle = std::exchange(other.handle, nullptr);
}
~task()
{
if (handle) {
handle.destroy();
}
}
// interface for extracting value without awaiting on it
bool is_ready() const
{
if (handle) {
return handle.promise().value.has_value();
}
return false;
}
T get()
{
if (handle) {
return std::move(*handle.promise().value);
}
throw std::runtime_error("get from task without promise");
}
std::coroutine_handle<promise_type> handle;
};
template<typename T>
task<T> task_promise_type<T>::get_return_object()
{
return {std::coroutine_handle<task_promise_type>::from_promise(*this)};
}
// simple timers
// stored timer tasks
struct timer_task
{
std::chrono::steady_clock::time_point target_time;
std::coroutine_handle<> handle;
};
// comparator
struct timer_task_before_cmp
{
bool operator()(const timer_task& left, const timer_task& right) const
{
return left.target_time > right.target_time;
}
};
std::priority_queue<timer_task, std::vector<timer_task>, timer_task_before_cmp> timers;
void submit_timer_task(std::coroutine_handle<> handle, std::chrono::seconds timeout)
{
timers.push(timer_task{std::chrono::steady_clock::now() + timeout, handle});
}
// timer loop
void loop()
{
while (!timers.empty()) {
auto& timer = timers.top();
// if it is time to run a coroutine
if (timer.target_time < std::chrono::steady_clock::now()) {
auto handle = timer.handle;
timers.pop();
handle.resume();
} else {
std::this_thread::sleep_until(timer.target_time);
}
}
}
// example
using namespace std::chrono_literals;
task<int> wait_n(int n)
{
std::cout << "before wait " << n << '\n';
co_await std::chrono::seconds(n);
std::cout << "after wait " << n << '\n';
co_return n;
}
task<int> test()
{
for (auto c : "hello world\n") {
std::cout << c;
co_await 1s;
}
std::cout << "test step 1\n";
auto w3 = wait_n(3);
std::cout << "test step 2\n";
auto w2 = wait_n(2);
std::cout << "test step 3\n";
auto w1 = wait_n(1);
std::cout << "test step 4\n";
auto r = co_await w2 + co_await w3;
std::cout << "awaiting already computed coroutine\n";
co_return co_await w1 + r;
}
// main can't be a coroutine and usually need some sort of looper (io_service or timer loop in this example )
int main()
{
// do something
auto result = test();
// execute deferred coroutines
loop();
std::cout << "result: " << result.get();
}
Output:
hello world
test step 1
before wait 3
test step 2
before wait 2
test step 3
before wait 1
test step 4
after wait 1
after wait 2
after wait 3
awaiting already computed coroutine
result: 6
Related
I am trying to work with Coroutines and multithreading together in C++.
In many coroutine examples, they create a new thread in the await_suspend of the co_await operator for the promise type. I want to submit to a thread pool in this function.
Here I define a co_await for future<int>.
void await_suspend(std::coroutine_handle<> handle) {
this->wait();
handle.resume();
}
I want to change this code to submit a lambda/function pointer to a threadpool. Potentially I can use Alexander Krizhanovsky's ringbuffer to communicate with the threadpool to create a threadpool by myself or use boost's threadpool.
My problem is NOT the thread pool. My problem is that I don't know how to get reference to the threadpool in this co_await operator.
How do I pass data from the outside environment where the operator is to this await_suspend function? Here is an example of what I want to do:
void await_suspend(std::coroutine_handle<> handle) {
// how do I get "pool"? from within this function
auto res = pool.enqueue([](int x) {
this->wait();
handle.resume();
});
}
I am not an expert at C++ so I'm not sure how I would get access to pool in this operator?
Here's the full code inspired by this GitHub gist A simple C++ coroutine example.
#include <future>
#include <iostream>
#include <coroutine>
#include <type_traits>
#include <list>
#include <thread>
using namespace std;
template <>
struct std::coroutine_traits<std::future<int>> {
struct promise_type : std::promise<int> {
future<int> get_return_object() { return this->get_future(); }
std::suspend_never initial_suspend() noexcept { return {}; }
std::suspend_never final_suspend() noexcept { return {}; }
void return_value(int value) { this->set_value(value); }
void unhandled_exception() {
this->set_exception(std::current_exception());
}
};
};
template <>
struct std::coroutine_traits<std::future<int>, int> {
struct promise_type : std::promise<int> {
future<int> get_return_object() { return this->get_future(); }
std::suspend_never initial_suspend() noexcept { return {}; }
std::suspend_never final_suspend() noexcept { return {}; }
void return_value(int value) { this->set_value(value); }
void unhandled_exception() {
this->set_exception(std::current_exception());
}
};
};
auto operator co_await(std::future<int> future) {
struct awaiter : std::future<int> {
bool await_ready() { return false; } // suspend always
void await_suspend(std::coroutine_handle<> handle) {
this->wait();
handle.resume();
}
int await_resume() { return this->get(); }
};
return awaiter{std::move(future)};
}
future<int> async_add(int a, int b)
{
auto fut = std::async([=]() {
int c = a + b;
return c;
});
return fut;
}
future<int> async_fib(int n)
{
if (n <= 2)
co_return 1;
int a = 1;
int b = 1;
// iterate computing fib(n)
for (int i = 0; i < n - 2; ++i)
{
int c = co_await async_add(a, b);
a = b;
b = c;
}
co_return b;
}
future<int> test_async_fib()
{
for (int i = 1; i < 10; ++i)
{
int ret = co_await async_fib(i);
cout << "async_fib(" << i << ") returns " << ret << endl;
}
}
int runfib(int arg) {
auto fut = test_async_fib();
fut.wait();
return 0;
}
int run_thread() {
printf("Running thread");
return 0;
}
int main()
{
std::list<shared_ptr<std::thread>> threads = { };
for (int i = 0 ; i < 10; i++) {
printf("Creating thread\n");
std::shared_ptr<std::thread> thread = std::make_shared<std::thread>(runfib, 5);
threads.push_back(thread);
}
std::list<shared_ptr<std::thread>>::iterator it;
for (it = threads.begin(); it != threads.end(); it++) {
(*it).get()->join();
printf("Joining thread");
}
fflush(stdout);
return 0;
}
You could have a thread pool, and let the coroutine promise schedule work on it.
I have this example around that is not exactly simple but may do the work:
Make your coroutine return a task<T>.
task<int> async_add(int a, int b) { ... }
Let the task share a state with its coroutine_promise. The state:
is implemented as an executable, resuming the coroutine when executed, and
holds the result of the operation (e.g. a std::promise<T>).
template <typename T>
class task<T>::state : public executable {
public:
void execute() noexcept override {
handle_.resume();
}
...
private:
handle_type handle_;
std::promise<T> result_;
};
The coroutine_promise returns a task_scheduler awaiter at initial_suspend:
template <typename T>
class task<T>::coroutine_promise {
public:
auto initial_suspend() {
return task_scheduler<task<T>>{};
}
The task_scheduler awaiter schedules the state:
template <is_task task_t>
struct task_scheduler : public std::suspend_always {
void await_suspend(task_t::handle_type handle) const noexcept {
thread_pool::get_instance().schedule(handle.promise().get_state());
}
};
Wrapping it all up: calls to a coroutine will make a state be scheduled on a thread, and, whenever a thread executes that state, the coroutine will be resumed. The caller can then wait for the task's result.
auto c{ async_add(a,b) };
b = c.get_result();
[Demo]
That example is from 2018, and was built for the Coroutine TS. So it's missing a lot of stuff from the actual C++20 feature. It also assumes the presence of a lot of things that didn't make it into C++20. The most notable of which being the idea that std::future is an awaitable type, and that it has continuation support when coupled with std::async.
It's not, and it doesn't. So there's not much you can really learn from this example.
co_await is ultimately built on the ability to suspend execution of a function and schedule its resumption after some value has been successfully computed. The actual C++20 std::future has exactly none of the machinery needed to do that. Nor does std::asyc give it the ability to do so.
As such, neither is an appropriate tool for this task.
You need to build your own future type (possibly using std::promise/future internally) which has a reference to your thread pool. When you co_await on this future, it is that new future which passes off the coroutine_handle to the thread pool, doing whatever is needed to ensure that this handle does not get executed until its current set of tasks is done.
Your pool or whatever needs to have a queue of tasks, such that it can insert new ones to be processed after all of the current one, and remove tasks once they've finished (as well as starting the next one). And those operations need to be properly synchronized. This queue needs to be accessible by both the future type and your coroutine's promise type.
When a coroutine ends, the promise needs to tell the queue that its current task is over and to move to the next one, or suspend the thread if there is no next one. And the promise's value needs to be forwarded to the next task. When a coroutine co_awaits on a future from your system, it needs to add that handle to the queue of tasks to be performed, possibly starting up the thread again.
Happening
I want to store move only data to promise_type, and get it in coroutine. I try return return lvalue from await_resume(). The result is compilation is successful but Segmentation fault appears during execution.
How can I improve?
Environment
OS: WSL2 Ubuntu 20.04
Complier: gcc 10.3
Command: g++ -std=c++20 -fcoroutines main.cpp
Code
main.cpp:
#include <iostream>
#include <coroutine>
#include <memory>
struct task
{
struct promise_type
{
using data_type = std::unique_ptr<int>;
task get_return_object() noexcept
{
return task(std::coroutine_handle<promise_type>::from_promise(*this));
}
std::suspend_never initial_suspend() const noexcept { return {}; }
void unhandled_exception() {}
std::suspend_always final_suspend() const noexcept { return {}; }
data_type data; // my data
};
void set_data(promise_type::data_type&& data) // [2] set my data
{
handle.promise().data = std::forward<promise_type::data_type>(data);
}
void resume()
{
handle.resume();
}
task(std::coroutine_handle<promise_type> handle) : handle(handle) {}
private:
std::coroutine_handle<promise_type> handle;
};
struct awaitalbe
{
bool await_ready() { return true; }
void await_suspend(std::coroutine_handle<task::promise_type> handle) { this->handle = handle; }
task::promise_type::data_type&& await_resume()
{
return std::move(handle.promise().data); // [3] get my data
}
std::coroutine_handle<task::promise_type> handle;
};
task f()
{
auto p2 = co_await awaitalbe{}; // [4] Segmentation fault here. after [3]
std::cout << (*p2) << std::endl;
}
int main(int argc, char* argv[])
{
auto task1 = f();
task::promise_type::data_type p1 = std::make_unique<task::promise_type::data_type::element_type>(10);
task1.set_data(std::move(p1)); // [1] set my data
task1.resume();
return 0;
}
In your struct awaitable, you have:
bool await_ready() { return true; }
To clarify, await_ready is a mechanism used to determine whether the result corresponding to an awaitable is ready or not. If it is ready, then there is really no good reason to suspend the calling coroutine (i.e the one that co_awaits the awaitable) and wait for the result - just call await_resume which retrieves the result.
It allows you do something like:
bool await_ready() {
if (will_read_block()) {
return false;
}
read_some_data_from_file();
return true;
}
so that non-blocking reads will complete synchronously.
In your case, you always return true. This tells the runtime: when a coroutine awaits an awaitable, never suspend that coroutine and directly call await_resume, skipping the suspension and await_suspend altogether. Indeed, if you set a breakpoint in await_suspend (or use a print statement if your debugger does not like coroutines) in await_suspend, you will find out that it never gets called. Therefore, you never actually assign anything to the handle member in your awaitable struct, resulting in undefined behaviour when you attempt to get a promise out of it.
The solution: just return false from await_ready.
I have got function f;
I want to throw exception 1s after start f.
I can't modify f(). It it possible to do it in c++?
try {
f();
}
catch (TimeoutException& e) {
//timeout
}
You can create a separate thread to run the call itself, and wait on a condition variable back in your main thread which will be signalled by the thread doing the call to f once it returns. The trick is to wait on the condition variable with your 1s timeout, so that if the call takes longer than the timeout you will still wake up, know about it, and be able to throw the exception - all in the main thread. Here is the code (live demo here):
#include <iostream>
#include <chrono>
#include <thread>
#include <mutex>
#include <condition_variable>
using namespace std::chrono_literals;
int f()
{
std::this_thread::sleep_for(10s); //change value here to less than 1 second to see Success
return 1;
}
int f_wrapper()
{
std::mutex m;
std::condition_variable cv;
int retValue;
std::thread t([&cv, &retValue]()
{
retValue = f();
cv.notify_one();
});
t.detach();
{
std::unique_lock<std::mutex> l(m);
if(cv.wait_for(l, 1s) == std::cv_status::timeout)
throw std::runtime_error("Timeout");
}
return retValue;
}
int main()
{
bool timedout = false;
try {
f_wrapper();
}
catch(std::runtime_error& e) {
std::cout << e.what() << std::endl;
timedout = true;
}
if(!timedout)
std::cout << "Success" << std::endl;
return 0;
}
You can also use std::packaged_task to run your function f() in another thread. This solution is more or less similar to this one, only that it uses standard classes to wrap things up.
std::packaged_task<void()> task(f);
auto future = task.get_future();
std::thread thr(std::move(task));
if (future.wait_for(1s) != std::future_status::timeout)
{
thr.join();
future.get(); // this will propagate exception from f() if any
}
else
{
thr.detach(); // we leave the thread still running
throw std::runtime_error("Timeout");
}
You can probably even try to wrap it into a function template, to allow calling arbitrary functions with timeout. Something along the lines of:
template <typename TF, typename TDuration, class... TArgs>
std::result_of_t<TF&&(TArgs&&...)> run_with_timeout(TF&& f, TDuration timeout, TArgs&&... args)
{
using R = std::result_of_t<TF&&(TArgs&&...)>;
std::packaged_task<R(TArgs...)> task(f);
auto future = task.get_future();
std::thread thr(std::move(task), std::forward<TArgs>(args)...);
if (future.wait_for(timeout) != std::future_status::timeout)
{
thr.join();
return future.get(); // this will propagate exception from f() if any
}
else
{
thr.detach(); // we leave the thread still running
throw std::runtime_error("Timeout");
}
}
And then use:
void f1() { ... }
call_with_timeout(f1, 5s);
void f2(int) { ... }
call_with_timeout(f2, 5s, 42);
int f3() { ... }
int result = call_with_timeout(f3, 5s);
This is an online example: http://cpp.sh/7jthw
You can create a new thread and asynchronously wait for 1s to pass, and then throw an exception. However, exceptions can only be caught in the same thread where they're thrown, so, you cannot catch in the same thread where you called f(), like in your example code - but that's not a stated requirement, so it may be OK for you.
Only if f is guaranteed to return in less than 1s, can you do this synchronously:
store current time
call f()
wait for current time - stored time + 1s
But it may be quite difficult to prove that f in fact does return in time.
This builds on Smeehee's example, if you need one that takes variable number of arguments (see also https://github.com/goblinhack/c-plus-plus-examples/blob/master/std_thread_timeout_template/README.md)
#include <condition_variable>
#include <iostream>
#include <mutex>
#include <thread>
int my_function_that_might_block(int x)
{
std::this_thread::sleep_for(std::chrono::seconds(10));
return 1;
}
template<typename ret, typename T, typename... Rest>
using fn = std::function<ret(T, Rest...)>;
template<typename ret, typename T, typename... Rest>
ret wrap_my_slow_function(fn<ret, T, Rest...> f, T t, Rest... rest)
{
std::mutex my_mutex;
std::condition_variable my_condition_var;
ret result = 0;
std::unique_lock<std::mutex> my_lock(my_mutex);
//
// Spawn a thread to call my_function_that_might_block().
// Pass in the condition variables and result by reference.
//
std::thread my_thread([&]()
{
result = f(t, rest...);
// Unblocks one of the threads currently waiting for this condition.
my_condition_var.notify_one();
});
//
// Detaches the thread represented by the object from the calling
// thread, allowing them to execute independently from each other. B
//
my_thread.detach();
if (my_condition_var.wait_for(my_lock, std::chrono::seconds(1)) ==
std::cv_status::timeout) {
//
// Throw an exception so the caller knows we failed
//
throw std::runtime_error("Timeout");
}
return result;
}
int main()
{
// Run a function that might block
try {
auto f1 = fn<int,int>(my_function_that_might_block);
wrap_my_slow_function(f1, 42);
//
// Success, no timeout
//
} catch (std::runtime_error& e) {
//
// Do whatever you need here upon timeout failure
//
return 1;
}
return 0;
}
I have got function f;
I want to throw exception 1s after start f.
I can't modify f(). It it possible to do it in c++?
try {
f();
}
catch (TimeoutException& e) {
//timeout
}
You can create a separate thread to run the call itself, and wait on a condition variable back in your main thread which will be signalled by the thread doing the call to f once it returns. The trick is to wait on the condition variable with your 1s timeout, so that if the call takes longer than the timeout you will still wake up, know about it, and be able to throw the exception - all in the main thread. Here is the code (live demo here):
#include <iostream>
#include <chrono>
#include <thread>
#include <mutex>
#include <condition_variable>
using namespace std::chrono_literals;
int f()
{
std::this_thread::sleep_for(10s); //change value here to less than 1 second to see Success
return 1;
}
int f_wrapper()
{
std::mutex m;
std::condition_variable cv;
int retValue;
std::thread t([&cv, &retValue]()
{
retValue = f();
cv.notify_one();
});
t.detach();
{
std::unique_lock<std::mutex> l(m);
if(cv.wait_for(l, 1s) == std::cv_status::timeout)
throw std::runtime_error("Timeout");
}
return retValue;
}
int main()
{
bool timedout = false;
try {
f_wrapper();
}
catch(std::runtime_error& e) {
std::cout << e.what() << std::endl;
timedout = true;
}
if(!timedout)
std::cout << "Success" << std::endl;
return 0;
}
You can also use std::packaged_task to run your function f() in another thread. This solution is more or less similar to this one, only that it uses standard classes to wrap things up.
std::packaged_task<void()> task(f);
auto future = task.get_future();
std::thread thr(std::move(task));
if (future.wait_for(1s) != std::future_status::timeout)
{
thr.join();
future.get(); // this will propagate exception from f() if any
}
else
{
thr.detach(); // we leave the thread still running
throw std::runtime_error("Timeout");
}
You can probably even try to wrap it into a function template, to allow calling arbitrary functions with timeout. Something along the lines of:
template <typename TF, typename TDuration, class... TArgs>
std::result_of_t<TF&&(TArgs&&...)> run_with_timeout(TF&& f, TDuration timeout, TArgs&&... args)
{
using R = std::result_of_t<TF&&(TArgs&&...)>;
std::packaged_task<R(TArgs...)> task(f);
auto future = task.get_future();
std::thread thr(std::move(task), std::forward<TArgs>(args)...);
if (future.wait_for(timeout) != std::future_status::timeout)
{
thr.join();
return future.get(); // this will propagate exception from f() if any
}
else
{
thr.detach(); // we leave the thread still running
throw std::runtime_error("Timeout");
}
}
And then use:
void f1() { ... }
call_with_timeout(f1, 5s);
void f2(int) { ... }
call_with_timeout(f2, 5s, 42);
int f3() { ... }
int result = call_with_timeout(f3, 5s);
This is an online example: http://cpp.sh/7jthw
You can create a new thread and asynchronously wait for 1s to pass, and then throw an exception. However, exceptions can only be caught in the same thread where they're thrown, so, you cannot catch in the same thread where you called f(), like in your example code - but that's not a stated requirement, so it may be OK for you.
Only if f is guaranteed to return in less than 1s, can you do this synchronously:
store current time
call f()
wait for current time - stored time + 1s
But it may be quite difficult to prove that f in fact does return in time.
This builds on Smeehee's example, if you need one that takes variable number of arguments (see also https://github.com/goblinhack/c-plus-plus-examples/blob/master/std_thread_timeout_template/README.md)
#include <condition_variable>
#include <iostream>
#include <mutex>
#include <thread>
int my_function_that_might_block(int x)
{
std::this_thread::sleep_for(std::chrono::seconds(10));
return 1;
}
template<typename ret, typename T, typename... Rest>
using fn = std::function<ret(T, Rest...)>;
template<typename ret, typename T, typename... Rest>
ret wrap_my_slow_function(fn<ret, T, Rest...> f, T t, Rest... rest)
{
std::mutex my_mutex;
std::condition_variable my_condition_var;
ret result = 0;
std::unique_lock<std::mutex> my_lock(my_mutex);
//
// Spawn a thread to call my_function_that_might_block().
// Pass in the condition variables and result by reference.
//
std::thread my_thread([&]()
{
result = f(t, rest...);
// Unblocks one of the threads currently waiting for this condition.
my_condition_var.notify_one();
});
//
// Detaches the thread represented by the object from the calling
// thread, allowing them to execute independently from each other. B
//
my_thread.detach();
if (my_condition_var.wait_for(my_lock, std::chrono::seconds(1)) ==
std::cv_status::timeout) {
//
// Throw an exception so the caller knows we failed
//
throw std::runtime_error("Timeout");
}
return result;
}
int main()
{
// Run a function that might block
try {
auto f1 = fn<int,int>(my_function_that_might_block);
wrap_my_slow_function(f1, 42);
//
// Success, no timeout
//
} catch (std::runtime_error& e) {
//
// Do whatever you need here upon timeout failure
//
return 1;
}
return 0;
}
I'm using the Boost ASIO library as a threadpool, which is widely described. However, I want to interrupt each thread, should the thread process for longer than 1 second and move onto the next posted task for the thread.
I can easily implement this using a separate deadline_timer, which is reset if the thread finishes before the deadline or interrupts the thread should the task go on for too long. However I assumed this would be built into ASIO. As it seems natural to have a task, with a timeout for network operations. But I can't see anything in the API for it, to do that succinctly.
Can anyone tell me if this functionality already exists? Or should I implement it the way I described?
Here's a quick solution I knocked together.
It requires that your submitted function objects accept an argument of type exec_context.
The task running in the io_service can query the .canceled() accessor (which is atomic) to determine whether it should cancel early.
It can then either throw an exception or return whatever value it intended to return.
The caller submits via the submit function. This function wraps the worker function with the context object and marshals its return value and/or exception into a std::future.
The caller can then query or wait on this future (or ignore it) as appropriate.
The caller gets a handle object, which has the method cancel() on it. Using this handle, the caller can either cancel, query or wait on the submitted task.
Hope it helps. It was fun to write.
#include <boost/asio.hpp>
#include <iostream>
#include <atomic>
#include <thread>
#include <chrono>
#include <future>
#include <stdexcept>
#include <exception>
#include <utility>
#include <type_traits>
//
// an object to allow the caller to communicate a cancellation request to the
// submitted task
//
struct exec_controller
{
/// #returns previous cancellation request state;
bool notify_cancel()
{
return _should_cancel.exchange(true);
}
bool should_cancel() const {
return _should_cancel;
}
private:
std::atomic<bool> _should_cancel = { false };
};
template<class Ret>
struct exec_state : std::enable_shared_from_this<exec_state<Ret>>
{
using return_type = Ret;
bool notify_cancel() {
return _controller.notify_cancel();
}
std::shared_ptr<exec_controller>
get_controller_ptr() {
return std::shared_ptr<exec_controller>(this->shared_from_this(),
std::addressof(_controller));
}
std::promise<return_type>& promise() { return _promise; }
private:
std::promise<return_type> _promise;
exec_controller _controller;
};
struct applyer;
struct exec_context
{
exec_context(std::shared_ptr<exec_controller> impl)
: _impl(impl)
{}
bool canceled() const {
return _impl->should_cancel();
}
private:
friend applyer;
std::shared_ptr<exec_controller> _impl;
};
struct applyer
{
template<class F, class Ret>
void operator()(F& f, std::shared_ptr<exec_state<Ret>> const& p) const
{
try {
p->promise().set_value(f(exec_context { p->get_controller_ptr() }));
}
catch(...) {
p->promise().set_exception(std::current_exception());
}
}
template<class F>
void operator()(F& f, std::shared_ptr<exec_state<void>> const& p) const
{
try {
f(exec_context { p->get_controller_ptr() });
p->promise().set_value();
}
catch(...) {
p->promise().set_exception(std::current_exception());
}
}
};
template<class Ret>
struct exec_result
{
using return_type = Ret;
exec_result(std::shared_ptr<exec_state<return_type>> p)
: _impl(p)
{}
bool cancel() {
return _impl->notify_cancel();
}
std::future<Ret>& get_future()
{
return _future;
}
private:
std::shared_ptr<exec_state<return_type>> _impl;
std::future<return_type> _future { _impl->promise().get_future() };
};
template<class Executor, class F>
auto submit(Executor& exec, F&& f)
{
using function_type = std::decay_t<F>;
using result_type = std::result_of_t<function_type(exec_context)>;
using state_type = exec_state<result_type>;
auto shared_state = std::make_shared<state_type>();
exec.post([shared_state, f = std::forward<F>(f)]
{
applyer()(f, shared_state);
});
return exec_result<result_type>(std::move(shared_state));
}
int main()
{
using namespace std::literals;
boost::asio::io_service ios;
boost::asio::io_service::strand strand(ios);
boost::asio::io_service::work work(ios);
std::thread runner([&] { ios.run(); });
std::thread runner2([&] { ios.run(); });
auto func = [](auto context)
{
for(int i = 0 ; i < 1000 ; ++i)
{
if (context.canceled())
throw std::runtime_error("canceled");
std::this_thread::sleep_for(100ms);
}
};
auto handle = submit(strand, func);
auto handle2 = submit(ios, [](auto context) { return 2 + 2; });
// cancel the handle, or wait on it as you wish
std::this_thread::sleep_for(1s);
handle.cancel();
handle2.cancel(); // prove that late cancellation is a nop
try {
std::cout << "2 + 2 is " << handle2.get_future().get() << std::endl;
}
catch(std::exception& e)
{
std::cerr << "failed to add 2 + 2 : " << e.what() << std::endl;
}
try {
handle.get_future().get();
std::cout << "task completed" << std::endl;
}
catch(std::exception const& e) {
std::cout << "task threw exception: " << e.what() << std::endl;
}
ios.stop();
runner.join();
runner2.join();
}
update: v2 adds some privacy protection to the classes, demonstrates 2 simultaneous tasks.
expected output:
2 + 2 is 4
task threw exception: canceled