Asio How to write a custom AsyncStream? - c++

I have actually managed to write a working AsyncStream. However, I am not really sure if I did it the way it is supposed to be done.
My main question is: Which executor is the get_executor() function supposed to return?
While implementing it several questions arose. I tagged them with Q<index>:. (I will keep the index stable in case of edits.) I would appreciate answers to them.
I tried to shorten/simplify the example as much as possible. It does compile and execute correctly.
#include <iostream>
#include <syncstream>
#include <thread>
#include <coroutine>
#include <future>
#include <random>
#include <string>
#include <memory>
#include <boost/asio.hpp>
#include <boost/asio/experimental/as_tuple.hpp>
#include <fmt/format.h>
inline std::osyncstream tout(const std::string & tag = "") {
auto hash = std::hash<std::thread::id>{}(std::this_thread::get_id());
auto hashStr = fmt::format("T{:04X} ", hash >> (sizeof(hash) - 2) * 8); // only display 2 bytes
auto stream = std::osyncstream(std::cout);
stream << hashStr;
if (not tag.empty())
stream << tag << " ";
return stream;
}
namespace asio = boost::asio;
template <typename Executor>
requires asio::is_executor<Executor>::value // Q1: Is this the correct way to require that Executor actually is an executor?
// I can't replace typename as there is no concept for Executors.
class Service : public std::enable_shared_from_this<Service<Executor>> {
template<typename CallerExecutor, typename ServiceExecutor>
// requires asio::is_executor<CallerExecutor>::value && asio::is_executor<ServiceExecutor>::value
friend class MyAsyncStream;
/// Data sent to the service
std::string bufferIn;
/// Data produced by the service
std::string bufferOut;
/// The strand used to avoid concurrent execution if the passed executor is backed by multiple threads.
asio::strand<Executor> strand;
/// Used to slow the data consumption and generation
asio::steady_timer timer;
/// Used to generate data
std::mt19937 gen;
/// https://stackoverflow.com/a/69753502/4479969
constexpr static const char charset[] =
"0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
template<typename URBG>
static std::string gen_string(std::size_t length, URBG &&g) {
std::string result;
result.resize(length);
std::sample(std::cbegin(charset),
std::cend(charset),
std::begin(result),
std::intptr_t(length),
std::forward<URBG>(g));
return result;
}
static const constexpr auto MAX_OPS = 7;
asio::awaitable<void> main(std::shared_ptr<Service> captured_self) {
const constexpr auto TAG = "SrvCo";
auto exe = co_await asio::this_coro::executor;
auto use_awaitable = asio::bind_executor(exe, asio::use_awaitable);
for (size_t ops = 0; ops < MAX_OPS; ops++) {
timer.expires_after(std::chrono::milliseconds(1000));
co_await timer.async_wait(use_awaitable);
tout(TAG) << "Ops " << ops << std::endl;
bufferOut += gen_string(8, gen);
tout(TAG) << "Produced: " << bufferOut << std::endl;
auto consumed = std::string_view(bufferIn).substr(0, 4);
tout(TAG) << "Consumed: " << consumed << std::endl;
bufferIn.erase(0, consumed.size());
}
tout(TAG) << "Done" << std::endl;
}
std::once_flag initOnce;
public:
explicit Service(Executor && exe) : strand{asio::make_strand(exe)}, timer{exe.context()} {}
void init() {
std::call_once(initOnce, [this]() {
asio::co_spawn(strand, main(this->shared_from_this()), asio::detached);
});
}
};
/// https://www.boost.org/doc/libs/1_66_0/doc/html/boost_asio/reference/AsyncReadStream.html
template<typename CallerExecutor, typename ServiceExecutor>
// requires asio::is_executor<CallerExecutor>::value && asio::is_executor<ServiceExecutor>::value // Q2: Q1 is working why isn't this working with two Types?
class MyAsyncStream {
typedef void async_rw_handler(boost::system::error_code, size_t);
/// Holds the callers executor.
/// Q3: Should this field even exist?
CallerExecutor executor;
/// Use a weak_ptr to behave like a file descriptor.
std::weak_ptr<Service<ServiceExecutor>> serviceRef;
public:
explicit MyAsyncStream(std::shared_ptr<Service<ServiceExecutor>> & service, CallerExecutor & exe) : executor{exe}, serviceRef{service} {}
/// Needed by the stream specification.
typedef CallerExecutor executor_type;
/**
* Q4: Which executor should this function return? The CallerExecutor or the ServiceExecutor or something different.
* In this example it is never called. However it is needed by the stream specification. https://www.boost.org/doc/libs/1_79_0/doc/html/boost_asio/reference/AsyncReadStream.html
* I really don't want to leak the ServiceExecutor to library users.
* #return Returns the executor supplied in the constructor.
*/
auto get_executor() {
tout() << "GETTING EXE" << std::endl;
return executor;
}
template<typename MutableBufferSequence,
asio::completion_token_for<async_rw_handler>
CompletionToken = typename asio::default_completion_token<CallerExecutor>::type>
requires asio::is_mutable_buffer_sequence<MutableBufferSequence>::value
auto async_read_some(const MutableBufferSequence &buffer,
CompletionToken &&token = typename asio::default_completion_token<CallerExecutor>::type()) {
return asio::async_initiate<CompletionToken, async_rw_handler>([&](auto completion_handler) { // Q5: Can I avoid this async_initiate somehow?
BOOST_ASIO_READ_HANDLER_CHECK(CompletionToken, completion_handler) type_check; // I tried using co_spawn directly without success.
asio::co_spawn(
asio::get_associated_executor(completion_handler), // Q6-1: should I use get_executor() here? Currently, I just get the callers executor.
[&, buffer = std::move(buffer), completion_handler = std::forward<CompletionToken>(completion_handler)]
() mutable -> asio::awaitable<void> {
const constexpr auto TAG = "ARS";
auto callerExe = co_await asio::this_coro::executor;
auto to_caller = asio::bind_executor(callerExe, asio::use_awaitable);
auto service = serviceRef.lock();
if (service == nullptr) {
std::move(completion_handler)(asio::error::bad_descriptor, 0);
co_return;
}
auto to_service = asio::bind_executor(service->strand, asio::use_awaitable);
co_await asio::post(to_service);
tout(TAG) << "performing read" << std::endl;
auto buf_begin = asio::buffers_begin(buffer);
auto buf_end = asio::buffers_end(buffer);
boost::system::error_code err = asio::error::fault;
size_t it = 0;
while (!service->bufferOut.empty()) {
if (buf_begin == buf_end) {
// error the buffer is smaller than the request read amount
err = asio::error::no_buffer_space;
goto completion;
}
*buf_begin++ = service->bufferOut.at(0);
service->bufferOut.erase(0, 1);
it++;
}
err = asio::stream_errc::eof;
completion:
co_await asio::post(to_caller); // without this call the function returns on the wrong thread
tout(TAG) << "read done returned" << std::endl;
std::move(completion_handler)(err, it);
}, asio::detached);
}, token);
}
template<typename ConstBufferSequence,
asio::completion_token_for <async_rw_handler>
CompletionToken = typename asio::default_completion_token<CallerExecutor>::type>
requires asio::is_const_buffer_sequence<ConstBufferSequence>::value
auto async_write_some(const ConstBufferSequence &buffer,
CompletionToken &&token = typename asio::default_completion_token<CallerExecutor>::type()) {
return asio::async_initiate<CompletionToken, async_rw_handler>([&](auto completion_handler) {
BOOST_ASIO_WRITE_HANDLER_CHECK(CompletionToken, completion_handler) type_check;
asio::co_spawn(
asio::get_associated_executor(completion_handler), // Q6-2: should I use get_executor() here? Currently, I just get the callers executor.
[&, buffer = std::move(buffer), completion_handler = std::forward<CompletionToken>(completion_handler)]
() mutable -> asio::awaitable<void> {
const constexpr auto TAG = "AWS";
auto callerExe = co_await asio::this_coro::executor;
auto to_caller = asio::bind_executor(callerExe, asio::use_awaitable);
auto service = serviceRef.lock();
if (service == nullptr) {
std::move(completion_handler)(asio::error::bad_descriptor, 0);
co_return;
}
auto to_service = asio::bind_executor(service->strand, asio::use_awaitable);
co_await asio::post(to_service);
tout(TAG) << "performing write" << std::endl;
auto buf_begin = asio::buffers_begin(buffer);
auto buf_end = asio::buffers_end(buffer);
boost::system::error_code err = asio::error::fault;
size_t it = 0;
while (buf_begin != buf_end) {
service->bufferIn.push_back(static_cast<char>(*buf_begin++));
it++;
}
err = asio::stream_errc::eof;
completion:
co_await asio::post(to_caller); // without this call the function returns on the wrong thread
tout(TAG) << "write done returned" << std::endl;
std::move(completion_handler)(err, it);
}, asio::detached);
}, token);
}
};
asio::awaitable<int> mainCo() {
const constexpr auto TAG = "MainCo";
auto exe = co_await asio::this_coro::executor;
auto use_awaitable = asio::bind_executor(exe, asio::use_awaitable);
auto as_tuple = asio::experimental::as_tuple(use_awaitable);
auto use_future = asio::use_future;
auto timer = asio::steady_timer(exe);
asio::thread_pool servicePool{1};
co_await asio::post(asio::bind_executor(servicePool, asio::use_awaitable));
tout() << "ServiceThread run start" << std::endl;
co_await asio::post(use_awaitable);
auto service = std::make_shared<Service<boost::asio::thread_pool::basic_executor_type<std::allocator<void>, 0> >>(servicePool.get_executor());
service->init();
auto stream = MyAsyncStream{service, exe};
for (size_t it = 0; it < 4; it++) {
{
std::vector<char> dataBackend;
auto dynBuffer = asio::dynamic_buffer(dataBackend, 50);
auto [ec, n] = co_await asio::async_read(stream, dynBuffer, as_tuple); // Q7-1: Can I avoid using as_tuple here?
tout(TAG) << "read done: " << std::endl
<< "n: " << n << std::endl
<< "msg: " << std::string{dataBackend.begin(), dataBackend.end()} << std::endl
<< "ec: " << ec.message()
<< std::endl;
}
{
auto const constexpr str = std::string_view{"HelloW"};
std::vector<char> dataBackend{str.begin(), str.end()};
auto dynBuffer = asio::dynamic_buffer(dataBackend, 50);
auto [ec, n] = co_await asio::async_write(stream, dynBuffer, as_tuple); // Q7-2: Can I avoid using as_tuple here?
tout(TAG) << "write done: " << std::endl
<< "n: " << n << std::endl
<< "msg: " << str << std::endl
<< "ec: " << ec.message()
<< std::endl;
}
timer.expires_after(std::chrono::milliseconds(2500));
co_await timer.async_wait(use_awaitable);
}
servicePool.join();
tout(TAG) << "Normal exit" << std::endl;
co_return 0;
}
int main() {
asio::io_context appCtx;
auto fut = asio::co_spawn(asio::make_strand(appCtx), mainCo(), asio::use_future);
tout() << "MainThread run start" << std::endl;
appCtx.run();
tout() << "MainThread run done" << std::endl;
return fut.get();
}

Q1
Looks fine I guess. But, see Q2.
Q2
Looks like it kills CTAD for AsyncStream. If I had to guess it's because ServiceExecutor is in non-deduced context. Helping it manually might help, but note how the second static assert here fails:
using ServiceExecutor = asio::thread_pool::executor_type;
using CallerExecutor = asio::any_io_executor;
static_assert(asio::is_executor<ServiceExecutor>::value);
static_assert(asio::is_executor<CallerExecutor>::value);
That's because co_await this_coro::executor returns any_io_executor, which is a different "brand" of executor. You need to check with execution::is_executor<T>::value instead. In fact, you might want to throw in a compatibility check as happens in Asio implementation functions:
(is_executor<Executor>::value || execution::is_executor<Executor>::value)
&& is_convertible<Executor, AwaitableExecutor>::value
PS:
It dawned on me that the non-deduced context is a symptom of
overly-specific template arguments. Just make AsyncStream<Executor, Service> (why bother with the specific type arguments that are
implementation details of Service?). That fixes the
CTAD (Live On Compiler Explorer)
template <typename CallerExecutor, typename Service>
requires my_is_executor<CallerExecutor>::value //
class MyAsyncStream {
Q3: Should this field even exist?
CallerExecutor executor;
Yes, that's how the IO object remembers its bound executor.
Q4: that's the spot where you return that caller executor.
It's not called in your application, but it might be. If you call any composed operation (like asio::async_read_until) against your IO Object (MyAsyncStream) it will - by default - run any handlers on the associated executor. This may add behaviours (like handler serialization, work tracking etc) that are required for correctness.
Like ever, the handler can be bound to another executor to override this.
Q5 I don't think so, unless you want to mandate use_awaitable (or compatible) completion tokens. The fact that you run a coro inside should be an implementation detail for the caller.
Q6 Yes, but not instead off. I'd assume you need to use the IO object's executor as the fallback:
asio::get_associated_executor(
completion_handler, this->get_executor())
Q7-1: Can I avoid using as_tuple here?
auto [ec, n] = co_await asio::async_read(stream, dynBuffer, as_tuple);
I suppose if you can "just" handle system_error exceptions:
auto n = co_await asio::async_read(stream, dynBuffer, use_awaitable);
Alternatively, I believe maybe redirect_error is applicable?

Related

What are Body named requirements of functional nodes in OneTBB.FlowGraph?

It's not clear to me what are the Body named requirements of functional nodes. Does it need to be CopyContructible, MoveContructible? Obviously Callable.
A simple program shows copies of the Body, apparently unrelated to the concurrency number:
#include <iostream>
#include <tbb/flow_graph.h>
using namespace tbb::flow;
struct function_body
{
function_body() { std::cout << "body constructed" << std::endl; }
~function_body() { std::cout << "body destructed" << std::endl; }
function_body(function_body const&) { std::cout << "body copied" << std::endl; }
function_body(function_body&&) { std::cout << "body moved" << std::endl; }
int operator()(int rhs) const { return rhs + 1; }
};
int main()
{
const int max_iteration = 10000;
const int concurrency = 5;
graph g;
auto src = input_node<int>(g, [counter = 0, max_iteration](tbb::flow_control& fc) mutable {
if (counter < max_iteration) {
fc.stop();
return counter;
}
return ++counter;
});
auto fun = function_node<int, int>(g, concurrency, function_body{});
make_edge(src, fun);
src.activate();
g.wait_for_all();
return 0;
}
results in
body constructed
body copied
body copied
body destructed
body destructed
body destructed
In which context do these copies happen? Does spawning a task involve copying the body?
Most of the examples I have looked at defines lambda body that captures resources by reference, so shared between body instances and cheap to copy.

How to initialise variables of returning tuple in c++?

I have a function xyz() which returns an tuple<std::string, double>.
When I call this function individually, I can do the following:
auto [tresh, loss] = xyz(i);
This results in no error, but if I want to use the xyz() function in an if else block, then I can not use the tresh and loss variables anymore in the following code. Example:
if (boolX) {
auto [tresh, loss] = xyz(i);
} else {
auto [tresh, loss] = xyz(j);
}
std::cout << tresh << std::endl;
I also tried to initialise the tresh and loss variable before the if else block and remove the auto, but this gives the following error:
Expected body of lambda expression
How to resolve this?
Depending on the exact code, you could replace with a ternary
auto [tresh, loss] = boolX ? xyz(a) : xyz(b);
std::cout << tresh << std::endl;
Before structured bindings were created, std::tie was the solution for unpacking tuples.
std::string tresh;
double loss;
if (boolX) {
std::tie(tresh, loss) = xyz(i);
} else {
std::tie(tresh, loss) = xyz(i);
}
std::cout << tresh << std::endl;
It was considered clunky, because you have to declare variables before initialization, but that's exactly what you want here.
You can use a lambda for it:
auto [tresh, loss] = [&boolX, &i, &j](){
if (boolX) {
return xyz(i);
} else {
return xyz(j);
}
}();
std::cout << tresh << std::endl;
You need to declare the two variables outside of the individual cases to access them outside. You can use std::tie to "tie" the two together:
#include <iostream>
#include <tuple>
#include <string>
std::tuple<std::string,double> xyz() { return {"foo",1.0}; }
int main() {
std::string a;
double b;
bool condition = false;
if (condition) {
std::tie(a,b) = xyz();
} else {
std::tie(a,b) = xyz();
}
std::cout << a << " " << b;
}
You might restructure you code, so you do the job in its branch:
if (boolX) {
auto [tresh, std::ignore] = xyz(i);
std::cout << tresh << std::endl;
} else {
auto [tresh, std::ignore] = xyz(j);
std::cout << tresh << std::endl;
}
You might argue than code is longer, so make an extra function or lambda:
const auto do_job = [](auto&& p)
{
auto [tresh, std::ignore] = p;
std::cout << tresh << std::endl;
}
if (boolX) {
do_job(xyz(i));
} else {
do_job(xyz(j));
}
You could do something like this:
std::tuple<std::string, double> results;
if (boolX) {
results = xyz(i);
}
else {
results = xyz(j);
}
auto [tresh, loss] = results;
std::cout << tresh << std::endl;

Asynchronous HTTP requests with co_await

I have multiple std::functions that are called on the main thread (not on different threads) in my app (as the result of an asynchronous HTTP requests), for example:
namespace model { struct Order{}; struct Trade{};}
std::function<void (std::string)> func1 = [](std::string http_answer)
{
std::vector<model::Order> orders = ParseOrders(http_answer);
std::cout << "Fetched " << orders.size() << " open/closed orders.");
}
std::function<void (std::string)> func2 = [](std::string http_answer)
{
std::vector<model::Trade> trades = ParseTrades(http_answer);
std::cout << "Fetched " << trades.size() << " trades.");
}
How to call process_result when the both func1 and func2 have parsed HTTP answers?
auto process_result = [](std::vector<model::Order> orders, std::vector<model::Trades> trades)
{
std::cout << "Matching orders and trades.";
};
Is there some solution with co_await or something like this?
You need some kind of synchronization point. Have not used co_await so far so this might not be what you are looking for, however in c++17 I'd go for a std::promise / std::future, maybe like this:
#include <iostream>
#include <functional>
#include <future>
std::promise<std::string> p1;
std::function<void (std::string)> func1 = [](std::string http_answer)
{
// std::vector<model::Order> orders = ParseOrders(http_answer);
// std::cout << "Fetched " << orders.size() << " open/closed orders.");
p1.set_value(http_answer);
};
std::promise<std::string> p2;
std::function<void (std::string)> func2 = [](std::string http_answer)
{
// std::vector<model::Trade> trades = ParseTrades(http_answer);
// std::cout << "Fetched " << trades.size() << " trades.");
p2.set_value(http_answer);
};
int main () {
// whenever that happens...
func1("foo");
func2("bar");
// synchronize on func1 and func2 finished
auto answer1 = p1.get_future().get();
auto answer2 = p2.get_future().get();
auto process_result = [&](/* std::vector<model::Order> orders, std::vector<model::Trades> trades */)
{
std::cout << "Matching orders and trades... " << answer1 << answer2;
};
process_result();
return 0;
}
http://coliru.stacked-crooked.com/a/3c74f00125999fb6
https://en.cppreference.com/w/cpp/thread/future

[Boost]::DI creating unique shared_ptr objects from injector

Taking this sample code I'd like to have as a result that
button1 and button2 are two separate ojects.
#include <iostream>
#include <memory>
#include "di.hpp"
namespace di = boost::di;
struct CommandQueue {
void addCommand() {}
};
struct Control {
Control( CommandQueue &cq ) : cq( cq ) {
static int sid{};
id = ++sid;
}
CommandQueue& cq;
int id{};
};
int main() {
auto injector = di::make_injector( di::bind<CommandQueue>().in(di::singleton) );
auto button1 = injector.create<std::shared_ptr<Control>>();
auto button2 = injector.create<std::shared_ptr<Control>>();
std::cout << "button1->id = " << button1->id << std::endl;
std::cout << "button2->id = " << button2->id << std::endl;
return 0;
}
The current output is:
button1->id = 1
button2->id = 1
Instead of the intended:
button1->id = 1
button2->id = 2
Removing the di::singleton lifetime scope from CommandQueue singleton also doesn't fix it.
I know the lifetime scope for a shared_ptr is a singleton by default but I thought that was referred to the injected dependency not the actual object created with create.
Indeed the simplest thing could be
auto button1 = injector.create<Control>();
auto button2 = injector.create<Control>();
std::cout << "button1.id = " << button1.id() << std::endl;
std::cout << "button2.id = " << button2.id() << std::endl;
Prints
button1.id = 1
button2.id = 2
If you must have shared-pointers, the next simplest thing would be
auto button1 = std::make_shared<Control>(injector.create<Control>());
auto button2 = std::make_shared<Control>(injector.create<Control>());
std::cout << "button1->id = " << button1->id() << std::endl;
std::cout << "button2->id = " << button2->id() << std::endl;
Conceptually you want a control-factory, not a control. So, you should consider creating a factory from the dependency container:
#include <boost/di.hpp>
#include <iostream>
#include <memory>
namespace di = boost::di;
struct CommandQueue {
void addCommand() {}
};
struct Control {
Control(CommandQueue &cq) : _cq(cq), _id(idgen()) { }
int id() const { return _id; }
struct Factory {
Factory(CommandQueue& cq) : _cq(cq) {}
CommandQueue& _cq;
template <typename... Args>
auto operator()(Args&&... args) const {
return std::make_shared<Control>(_cq, std::forward<Args>(args)...);
}
};
private:
static int idgen() { static int sid{}; return ++sid; }
CommandQueue &_cq;
int _id{};
};
int main() {
auto injector = di::make_injector(di::bind<CommandQueue>().in(di::singleton));
auto factory = injector.create<Control::Factory>();
auto button1 = factory();
auto button2 = factory();
std::cout << "button1->id = " << button1->id() << std::endl;
std::cout << "button2->id = " << button2->id() << std::endl;
}

Dynamically creating a C++ function argument list at runtime

I'm trying to generate an argument list for a function call during runtime, but I can't think of a way to accomplish this in c++.
This is for a helper library I'm writing. I'm taking input data from the client over a network and using that data to make a call to a function pointer that the user has set previously. The function takes a string(of tokens, akin to printf), and a varying amount of arguments. What I need is a way to add more arguments depending on what data has been received from the client.
I'm storing the functions in a map of function pointers
typedef void (*varying_args_fp)(string,...);
map<string,varying_args_fp> func_map;
An example usage would be
void printall(string tokens, ...)
{
va_list a_list;
va_start(a_list, tokens);
for each(auto x in tokens)
{
if (x == 'i')
{
cout << "Int: " << va_arg(a_list, int) << ' ';
}
else if(x == 'c')
{
cout << "Char: " << va_arg(a_list, char) << ' ';
}
}
va_end(a_list);
}
func_map["printall"] = printall;
func_map["printall"]("iic",5,10,'x');
// prints "Int: 5 Int: 10 Char: x"
This works nicely when hardcoding the function call and it's arguments, but if I've received the data "CreateX 10 20", the program needs to be able to make the argument call itself. eg
// func_name = "CreateX", tokens = 'ii', first_arg = 10, second_arg = 20
func_map[func_name](tokens,first_arg,second_arg);
I can't predict how users are going to lay out the functions and code this beforehand.
If anyone has suggestions on accomplishing this task another way, feel free to suggest. I need the user to be able to "bind" a function to the library, and for the library to call it later after it has received data from a networked client, a callback in essence.
Here is a C++11 solution. It does not support varargs functions like printall or printf, this is impossible with this technique and IMO impossible at all, or at the very least extremely tricky. Such function are difficult to use safely in an environment like yours anyway, since any bad request from any client could crash the server, with absolutely no recourse whatsoever. You probably should move to container-based interface for better safety and stability.
On the other hand, this method supports all (?) other functions uniformly.
#include <vector>
#include <iostream>
#include <functional>
#include <stdexcept>
#include <string>
#include <boost/any.hpp>
template <typename Ret, typename... Args>
Ret callfunc (std::function<Ret(Args...)> func, std::vector<boost::any> anyargs);
template <typename Ret>
Ret callfunc (std::function<Ret()> func, std::vector<boost::any> anyargs)
{
if (anyargs.size() > 0)
throw std::runtime_error("oops, argument list too long");
return func();
}
template <typename Ret, typename Arg0, typename... Args>
Ret callfunc (std::function<Ret(Arg0, Args...)> func, std::vector<boost::any> anyargs)
{
if (anyargs.size() == 0)
throw std::runtime_error("oops, argument list too short");
Arg0 arg0 = boost::any_cast<Arg0>(anyargs[0]);
anyargs.erase(anyargs.begin());
std::function<Ret(Args... args)> lambda =
([=](Args... args) -> Ret {
return func(arg0, args...);
});
return callfunc (lambda, anyargs);
}
template <typename Ret, typename... Args>
std::function<boost::any(std::vector<boost::any>)> adaptfunc (Ret (*func)(Args...)) {
std::function<Ret(Args...)> stdfunc = func;
std::function<boost::any(std::vector<boost::any>)> result =
([=](std::vector<boost::any> anyargs) -> boost::any {
return boost::any(callfunc(stdfunc, anyargs));
});
return result;
}
Basically you call adaptfunc(your_function), where your_function is a function of any type (except varargs). In return you get an std::function object that accepts a vector of boost::any and returns a boost::any. You put this object in your func_map, or do whatever else you want with them.
Types of the arguments and their number are checked at the time of actual call.
Functions returning void are not supported out of the box, because boost::any<void> is not supported. This can be dealt with easily by wrapping the return type in a simple template and specializing for void. I've left it out for clarity.
Here's a test driver:
int func1 (int a)
{
std::cout << "func1(" << a << ") = ";
return 33;
}
int func2 (double a, std::string b)
{
std::cout << "func2(" << a << ",\"" << b << "\") = ";
return 7;
}
int func3 (std::string a, double b)
{
std::cout << "func3(" << a << ",\"" << b << "\") = ";
return 7;
}
int func4 (int a, int b)
{
std::cout << "func4(" << a << "," << b << ") = ";
return a+b;
}
int main ()
{
std::vector<std::function<boost::any(std::vector<boost::any>)>> fcs = {
adaptfunc(func1), adaptfunc(func2), adaptfunc(func3), adaptfunc(func4) };
std::vector<std::vector<boost::any>> args =
{{777}, {66.6, std::string("yeah right")}, {std::string("whatever"), 0.123}, {3, 2}};
// correct calls will succeed
for (int i = 0; i < fcs.size(); ++i)
std::cout << boost::any_cast<int>(fcs[i](args[i])) << std::endl;
// incorrect calls will throw
for (int i = 0; i < fcs.size(); ++i)
try {
std::cout << boost::any_cast<int>(fcs[i](args[fcs.size()-1-i])) << std::endl;
} catch (std::exception& e) {
std::cout << "Could not call, got exception: " << e.what() << std::endl;
}
}
As already mentioned by #TonyTheLion, you can use boost::variant or boost::any to select between types at runtime:
typedef std::function<void(const std::string&, const std::vector<boost::variant<char, int>>&)> varying_args_fn;
std::map<std::string, varying_args_fn> func_map;
The you can e.g. use a static visitor to distinguish between the types. Here is a full example, note that the tokens parameter is actually no longer necessary since boost::variant knows at runtime what type is stored in it:
#include <map>
#include <vector>
#include <string>
#include <functional>
#include <iostream>
#include <boost/variant.hpp>
#include <boost/any.hpp>
typedef std::function<void(const std::string&, const std::vector<boost::variant<char, int>>&)> varying_args_fn;
void printall(const std::string& tokens, const std::vector<boost::variant<char, int>>& args) {
for (const auto& x : args) {
struct : boost::static_visitor<> {
void operator()(int i) {
std::cout << "Int: " << i << ' ';
}
void operator()(char c) {
std::cout << "Char: " << c << ' ';
}
} visitor;
boost::apply_visitor(visitor, x);
}
}
int main() {
std::map<std::string, varying_args_fn> func_map;
func_map["printall"] = printall;
func_map["printall"]("iic", {5, 10, 'x'});
}