I want to program a C++ code able to solve a set of equation systems which are generally nonlinear.
I am already able to solve a system of m equations with n variables using GSL when m = n by following its documentation.
However, often I have fewer equations than variables. Can I use the same functionality of GSL, or do I have to opt for a different approach or another library? Why is there not a section about solving rectangular systems in the documentation?
This is what I've tried:
#include <cmath>
#include <gsl/gsl_vector.h>
#include <iostream>
#include <gsl/gsl_multiroots.h>
void print_vector(gsl_vector *gsl) {
printf("[");
for (int r = 0; r < gsl->size; r++) {
if (r != gsl->size - 1) {
printf("%+.5f; ", gsl->data[r]);
} else {
printf("%+.5f", gsl->data[r]);
}
}
printf("]\n");
}
int evaluate(const gsl_vector *x, void *params, gsl_vector *f) {
const double x0 = gsl_vector_get(x, 0);
const double x1 = gsl_vector_get(x, 1);
const double x2 = gsl_vector_get(x, 2);
const double y0 = 0.1 * pow(x0, 2) + 0.1 * pow(x1, 2) + 2 - x2;
// alone not working
gsl_vector_set(f, 0, y0);
// with this not working
// gsl_vector_set(f, 1, 0);
// gsl_vector_set(f, 2, 0);
// with this not working
// gsl_vector_set(f, 1, y0);
// gsl_vector_set(f, 2, y0);
return GSL_SUCCESS;
}
int main() {
const size_t n = 3;
const gsl_multiroot_fsolver_type *T;
T = gsl_multiroot_fsolver_hybrids; // "iteration is not making progress towards solution"
// T = gsl_multiroot_fsolver_hybrid; // "iteration is not making progress towards solution"
// T = gsl_multiroot_fsolver_broyden; // "gsl: lu.c:266: ERROR: matrix is singular"
// T = gsl_multiroot_fsolver_dnewton; // "gsl: lu.c:147: ERROR: matrix is singular"
gsl_multiroot_fsolver *s = gsl_multiroot_fsolver_alloc(T, n);
gsl_multiroot_function f_multiroot_function = {evaluate, n, nullptr};
gsl_vector *x_gsl = gsl_vector_alloc(n);
gsl_vector_set(x_gsl, 0, 3.4);
gsl_vector_set(x_gsl, 1, 2.7);
gsl_vector_set(x_gsl, 2, 3.8);
std::cout << "x_gsl: ";
print_vector(x_gsl);
gsl_multiroot_fsolver_set(s, &f_multiroot_function, x_gsl);
int status;
size_t iter = 0;
do {
iter++;
status = gsl_multiroot_fsolver_iterate(s);
std::cout << "s->x=" << std::endl;
print_vector(s->x);
if (status) {
std::cout << "STOPPING: " << gsl_strerror(status) << "." << std::endl;
break;
}
status = gsl_multiroot_test_residual(s->f, 1e-5);
} while (status == GSL_CONTINUE && iter < 20);
return 0;
}
And this
x_gsl: [+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
s->x=
[+3.40000; +2.70000; +3.80000]
STOPPING: iteration is not making progress towards solution.
Process finished with exit code 0
or this
x_gsl: [+3.40000; +2.70000; +3.80000]
gsl: lu.c:147: ERROR: matrix is singular
Default GSL error handler invoked.
Process finished with exit code 134 (interrupted by signal 6: SIGABRT)
is what I get.
Related
I try to write 2D Cross correlation in Sycl and OneAPI.
The idea is to write a kind of Map skeleton which wraps OneAPI calls hiding hardware targeting issues through some parameter specifying the kind of target (CPU or GPU/Accelerator).
this is my Map Class:
//Definition of Map Skeleton
template<class Tin, class Tout, class Function>
class Map {
private:
Function fun;
public:
Map() {
}
Map(Function f) :
fun(f) {
}
//Overriding () operator
std::vector<std::vector<Tout>> operator()(bool use_tbb,
std::vector<std::vector<Tin>> &img,
std::vector<std::vector<Tin>> &ker) {
int img_row = img.size();
int img_col = img[0].size();
int filt_row = ker.size();
int filt_col = ker[0].size();
int out_row = img_row - filt_row;
int out_col = img_col - filt_col;
std::vector<std::vector<Tout>> out;
if (use_tbb) {
uTimer *timer = new uTimer("Executing Code On CPU");
tbb::parallel_for(
tbb::blocked_range2d<int, int>(0, out_row, 0, out_col),
[&](tbb::blocked_range2d<int, int> &t) {
for (int n = t.rows().begin(); n < t.rows().end();
++n) {
for (int m = t.cols().begin(); m < t.cols().end();
++m) {
out[n][m] = fun(
slice_matrix(img, n, m, filt_row,
filt_col), ker);
}
}
});
timer->~uTimer();
return out;
} else {
/*change 2D Matrices to the 1D linear arrays,
*
*and operate on them as contiguous blocks */
size_t M = img_row + img_col;
size_t N = filt_row + filt_col;
//size_t O = out_row + out_col;
size_t O_row = out_row;
size_t O_col = out_col;
std::vector<Tin> img_host;
std::vector<Tin> ker_host;
std::vector<Tout> out_gpu;
/* A 2D std::vector<std::vector<T>>
* does not have elements stored contiguously in the memory.
* Thus I define a vector<T> and operate on them as contiguous blocks.*/
//Define Buffer for
sycl::buffer<Tin, 1> img_buffer(img_host.data(), M);
sycl::buffer<Tin, 1> ker_buffer(ker_host.data(), N);
sycl::buffer<Tin, 2> out_buffer(out_gpu.data(), sycl::range<2> {
O_row, O_col });
//Profiling GPU
// Initialize property list with profiling information
sycl::property_list propList {
sycl::property::queue::enable_profiling() };
// Build the command queue (constructed to handle event profling)
sycl::queue gpuQueue = cl::sycl::queue(sycl::gpu_selector(),
propList);
// print out the device information used for the kernel code
std::cout << "Device: "
<< gpuQueue.get_device().get_info<sycl::info::device::name>()
<< std::endl;
std::cout << "Compute Units: "
<< gpuQueue.get_device().get_info<
sycl::info::device::max_compute_units>()
<< std::endl;
auto start_overall = std::chrono::system_clock::now();
auto event = gpuQueue.submit(
[&](sycl::handler &h) {
//local copy of fun
auto f = fun;
sycl::accessor img_accessor(img_buffer, h,
sycl::read_only);
sycl::accessor ker_accessor(ker_buffer, h,
sycl::read_only);
sycl::accessor out_accessor(out_buffer, h,
sycl::write_only);
h.parallel_for(sycl::range<2> { O_row, O_col },
[=](sycl::id<2> index) {
int row = index[0];
int col = index[1];
out_accessor[row][col] = f(
slice_matrix(img_accessor, O_row,
O_col, filt_row, filt_col),
ker_accessor);
});
});
event.wait();
auto end_overall = std::chrono::system_clock::now();
cl_ulong submit_time = event.template get_profiling_info<
cl::sycl::info::event_profiling::command_submit>();
cl_ulong start_time = event.template get_profiling_info<
cl::sycl::info::event_profiling::command_start>();
cl_ulong end_time = event.template get_profiling_info<
cl::sycl::info::event_profiling::command_end>();
auto submission_time = (start_time - submit_time) / 1000000.0f;
std::cout << "Submit Time: " << submission_time << " ms"
<< std::endl;
auto execution_time = (end_time - start_time) / 1000000.0f;
std::cout << "Execution Time: " << execution_time << " ms"
<< std::endl;
auto execution_overall = std::chrono::duration_cast<
std::chrono::milliseconds>(end_overall - start_overall);
std::cout << "Overall Execution Time: " << execution_overall.count()
<< " ms" << std::endl;
}
;
return out;
}
};
And this is my slice_matrix:
//Function which Slice a specific part of my matricx
template<class T>
std::vector<std::vector<T>> slice_matrix(std::vector<std::vector<T>> mat, int i,
int j, int r, int c) {
std::vector<std::vector<T>> out(r, std::vector<T>(c, 0));
for (int k = 0; k < r; k++) {
std::vector<T> temp(mat[i + k].begin() + j, mat[i + k].begin() + j + c);
out[k] = temp;
}
return out;
}
;
The problem is that, in Sycl part inside parallel-for
out_accessor[row][col] = f(
slice_matrix(img_accessor, O_row,
O_col, filt_row, filt_col),
ker_accessor);
});
the program shows me an error which is:
no matching function for call to 'slice_matrix'
I tried to put my slice_matrix inside the Map Class, but nothing changed. Also I thought about limitation of Sycl about
" SYCL device code, as defined by this specification, does not support virtual function calls ", so I defined a local copy of slice_matrix, but again I had an error.
I cannot understand how to resolve this error.
You are passing a sycl::accessor type to slice_matrix, but the signature of slice_matrix is:
//Function which Slice a specific part of my matricx
template<class T>
std::vector<std::vector<T>> slice_matrix(std::vector<std::vector<T>> mat, int i, int j, int r, int c)
So the signature does not match...
You would need a version of slice_matrix that takes an accessor object instead of your vector.
I am currently porting an algorithm from boost::ublas to Eigen:
Code 1 with boost::ublas
#ifndef KHACH_H
#define KHACH_H
#include <set>
#include <boost/numeric/ublas/vector.hpp>
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/banded.hpp>
#include <boost/numeric/ublas/lu.hpp>
#include <iostream>
#include <boost/numeric/ublas/io.hpp>
//namespace Minim {
namespace ublas=boost::numeric::ublas;
template<class T>
bool InvertMatrix(const ublas::matrix<T> &input,
ublas::matrix<T> &inverse)
{
using namespace boost::numeric::ublas;
typedef permutation_matrix<std::size_t> pmatrix;
matrix<T> A(input);
pmatrix pm(A.size1());
int res = lu_factorize(A,pm);
if( res != 0 ) return false;
inverse.assign(ublas::identity_matrix<T>(A.size1()));
lu_substitute(A, pm, inverse);
return true;
}
inline void Lift(const ublas::matrix<double> &A,
ublas::matrix<double> &Ap)
{
Ap.resize(A.size1()+1,
A.size2());
ublas::matrix_range<ublas::matrix<double> >
sub(Ap,
ublas::range(0, A.size1()),
ublas::range(0, A.size2()));
sub.assign(A);
ublas::row(Ap, Ap.size1()-1)=ublas::scalar_vector<double>(A.size2(),1.0);
}
#endif
//}
Code 2 with Eigen:
#ifndef KHACH_H
#define KHACH_H
#include <set>
#include <iostream>
#include <Eigen/Eigen>
//namespace Minim {
template <class NT>
using MT = Eigen::Matrix<NT, Eigen::Dynamic, Eigen::Dynamic>;
template <class NT>
using VT = Eigen::Matrix<NT, Eigen::Dynamic, 1>;
template<typename Derived>
inline bool is_nan(const Eigen::MatrixBase<Derived>& x)
{
return ((x.array() == x.array())).all();
}
template<class T>
bool InvertMatrix(const MT<T> &input,
MT<T> &inverse)
{
inverse.setIdentity(input.rows(), input.cols());
inverse = input.inverse();
return !is_nan(inverse);
}
inline void Lift(const MT<double> &A, MT<double> &Ap)
{
Ap.resize(A.rows()+1, A.cols());
Ap.topLeftCorner(A.rows(), A.cols()) = A;
Ap.row(Ap.rows()-1).setConstant(1.0);
}
#endif
//}
These functions are part of the bigger code and functionality, but I think these two functions are the ones creating the difference. The functions with Eigen are giving a different output for some large matrices compared to the output of the code using boost, I am not able to understand the bugs.
Any help would be appreciated.
You didn't specify any inputs or what the discrepancy is you're finding.
This lead me to build simple testers, in which I find that an obvious source of "differences" is the inaccuracy of [binary] floating point representations.
You can easily confirm it with some test input: whose inverse is :
Live On Compuler Explorer
#include <boost/numeric/ublas/matrix.hpp>
#include <boost/numeric/ublas/vector.hpp>
#include <set>
#include <boost/numeric/ublas/banded.hpp>
#include <boost/numeric/ublas/lu.hpp>
#include <boost/numeric/ublas/triangular.hpp>
#include <boost/numeric/ublas/io.hpp>
#include <iostream>
namespace Minim1 {
namespace ublas = boost::numeric::ublas;
template <class T> using MT = ublas::matrix<T>;
template <class T> bool InvertMatrix(const MT<T>& input, MT<T>& inverse)
{
using namespace boost::numeric::ublas;
typedef permutation_matrix<std::size_t> pmatrix;
matrix<T> A(input);
pmatrix pm(A.size1());
int res = lu_factorize(A, pm);
if (res != 0)
return false;
inverse.assign(ublas::identity_matrix<T>(A.size1()));
lu_substitute(A, pm, inverse);
return true;
}
template <class T>
inline void Lift(const ublas::matrix<T>& A, ublas::matrix<T>& Ap)
{
Ap.resize(A.size1() + 1, A.size2());
ublas::matrix_range<ublas::matrix<T>> sub(
Ap, ublas::range(0, A.size1()), ublas::range(0, A.size2()));
sub.assign(A);
ublas::row(Ap, Ap.size1() - 1) = ublas::scalar_vector<T>(A.size2(), 1.0);
}
}
#include <Eigen/Eigen>
#include <iostream>
#include <set>
namespace Minim2 {
template <class T>
using MT = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>;
static_assert(Eigen::RowMajor == 1);
template <class T>
using VT = Eigen::Matrix<T, Eigen::Dynamic, Eigen::RowMajor>;
template <typename Derived>
inline bool is_nan(const Eigen::MatrixBase<Derived>& x)
{
return ((x.array() == x.array())).all();
}
template <class T> bool InvertMatrix(const MT<T>& input, MT<T>& inverse)
{
inverse.setIdentity(input.rows(), input.cols());
inverse = input.inverse();
return !is_nan(inverse);
}
template <typename T>
inline void Lift(const MT<T>& A, MT<T>& Ap)
{
Ap.resize(A.rows() + 1, A.cols());
Ap.topLeftCorner(A.rows(), A.cols()) = A;
Ap.row(Ap.rows() - 1).setConstant(1.0);
}
}
template <typename T>
static inline std::string compare(Minim1::MT<T> const& a, Minim2::MT<T> const& b) {
if (a.size1() != static_cast<size_t>(b.rows())) return "rows do not match";
if (a.size2() != static_cast<size_t>(b.cols())) return "cols do not match";
for (size_t r = 0; r < a.size1(); r++) {
for (size_t c = 0; c < a.size2(); c++) {
auto va = a(r,c);
auto vb = b(r,c);
auto delta = std::abs(va-vb);
if (va != vb) {
std::ostringstream oss;
oss
<< "mismatch at (" << r << ", " << c << "): "
<< va << " != " << vb
<< " delta:" << std::abs(va-vb)
<< " significant? " << std::boolalpha
<< (std::numeric_limits<T>::epsilon() < delta) << "\n";
return oss.str();
}
}
}
return "equivalent";
}
template <typename T>
auto convert(Minim1::MT<T> const& a) {
Minim2::MT<T> b(a.size1(), a.size2());
for (size_t r = 0; r < a.size1(); r++) {
for (size_t c = 0; c < a.size2(); c++) {
b(r, c) = a(r, c);
} }
return b;
}
int main() {
using T = double;
using M1 = Minim1::MT<T>;
using M2 = Minim2::MT<T>;
auto report = [](auto const& a, auto const& b) {
std::cout << "\na: ------\n" << a;
std::cout << "\nb: ------\n" << b;
std::cout << "\n" << compare(a, b) << "\n";
};
M1 a(3, 3);
a(0, 0) = 1; a(0, 1) = 2; a(0, 2) = 3;
a(1, 0) = 3; a(1, 1) = 2; a(1, 2) = 1;
a(2, 0) = 2; a(2, 1) = 1; a(2, 2) = 3;
M2 b(3, 3);
b << 1, 2, 3,
3, 2, 1,
2, 1, 3;
report(a, b);
std::cout << "\nINVERSIONS";
M1 ai(a.size1(), a.size2());
M2 bi(b.rows(), b.cols());
Minim1::InvertMatrix(a, ai);
Minim2::InvertMatrix(b, bi);
report(ai, bi);
M2 deltas = (convert(ai) - bi).cwiseAbs();
constexpr auto eps = std::numeric_limits<T>::epsilon();
std::cout << "deltas:\n" << deltas << "\n";
for (int r = 0; r < deltas.rows(); r++) {
for (int c = 0; c < deltas.cols(); c++) {
auto d = deltas(r,c);
if (d > eps) {
std::cout << "The delta at (" << r << ", " << c << ") (" << d << " is > ε (" << eps << ")\n";
}
} }
}
Prints
a: ------
[3,3]((1,2,3),(3,2,1),(2,1,3))
b: ------
1 2 3
3 2 1
2 1 3
equivalent
INVERSIONS
a: ------
[3,3]((-0.416667,0.25,0.333333),(0.583333,0.25,-0.666667),(0.0833333,-0.25,0.333333))
b: ------
-0.416667 0.25 0.333333
0.583333 0.25 -0.666667
0.0833333 -0.25 0.333333
mismatch at (0, 0): -0.416667 != -0.416667 delta:5.55112e-17 significant? false
deltas:
5.55112e-17 0 0
0 2.77556e-17 0
0 2.77556e-17 0
Confirming that all differences are around (even <) the machine epsilon for the chosen data type. If you replace that one:
using T = long double;
You get the following deltas: Compiler Explorer
mismatch at (0, 0): -0.416667 != -0.416667 delta:2.71051e-20 significant? false
deltas:
2.71051e-20 1.35525e-20 0
5.42101e-20 0 0
6.77626e-21 0 0
Where To Go From Here
Find out whether this is your problem by plugging in your inputs. You might stumble on other things that escaped your attention before. If not, at least you now have the tools to make a new, more focused question.
If you want to learn more about floating point inaccuracy:
Why are floating point numbers inaccurate?
Is floating point math broken?
How can I pass some constants in boost::math::tools::brent_find_minima() from main()?
struct func
{
template <class T>
T operator()(T const& x)
{ //
T Wnew = 20.0/9.0*720.0; // Goal is to pass it through main()
T W = 2500; // Goal is to pass it through main()
return abs(Wnew/2/x - atan(W/2/x));
}
};
int main(int argc, char **argv)
{
// How can I pass Wnew and W values while calling boost::math::tools::brent_find_minima() from main()
std::pair<double, double> r = boost::math::tools::brent_find_minima(func(), 1.0, 2000.0, std::numeric_limits<double>::digits);
std::cout.precision(std::numeric_limits<double>::digits10);
std::cout << "x at minimum = " << r.first << ", f(" << r.first << ") = " << r.second << std::endl;
}
I suppose what you want is to create different instances of func with different values for W and Wnew. You are almost there, just give your func some state:
struct func
{
double Wnew;
double W;
func(double Wnew, double W) : Wnew(Wnew),W(W) {}
double operator()(double const& x)
{
return abs(Wnew/2/x - atan(W/2/x));
}
};
And then create an instance like this:
double Wnew = 1.0;
double W = 2.0;
auto r = boost::math::tools::brent_find_minima(func(Wnew,W), 1.0, 2000.0, std::numeric_limits<double>::digits);
// ^^
I was a bit puzzled by your operator() being a template and changed that. If you had good reasons for it, simply make it a template again.
PS: Since C++11, there are lambda expressions that allow a much terser syntax for functors.
I like the lambda approach the best:
#include <cmath>
#include <iostream>
#include <boost/math/tools/minima.hpp>
int main(int argc, char **argv)
{
double W = 2500;
double Wnew = 20.0/9.0*720.0;
if (argc == 3) {
double W = std::stod(argv[1]);
double Wnew = std::stod(argv[2]);
}
auto f = [&W, &Wnew](double x)->double {
return std::abs(Wnew/2/x - atan(W/2/x));
};
std::pair<double, double> r = boost::math::tools::brent_find_minima(f, 1.0, 2000.0, std::numeric_limits<double>::digits);
std::cout.precision(std::numeric_limits<double>::digits10);
std::cout << "x at minimum = " << r.first << ", f(" << r.first << ") = " << r.second << std::endl;
}
The documentation for brent_find_minima was written pre-C++11, and uses what now should be regarded as overly verbose syntax. The post C++11 boost.math documentation examples (like the quadrature routines) all use lambdas.
The functionality that I want is like:
std::vector<float> GetFuncVec(int N, FuncType type)
{
std::vector<float> fn(N);
float tmp = (N - 1) / 2.0;
switch (type) {
case SIN:
for (int i=0; i<N; ++i)
fn[i] = sin(M_PI * i / tmp);
break;
case SINC:
for (int i=0; i<N; ++i)
fn[i] = sin(M_PI * i / tmp) / (M_PI * i / tmp);
break;
...
}
return fn;
}
I find this unsatisfactory because there is a lot of code duplication. Looking around, I found the STL algorithm std::generate() which can fill a vector using a functor, which can have an increment member to play the role of i.
I see two potential routes. The first is to use a factory to initialize the functor. The problem with this method is code separation (above, the different cases are kept nicely together) and increased overheads with multiple new classes needed.
The second is to use lambda functions (which I have very little experience with). This is nice because I can define each function in a single line in the switch statement. But I don't see how I can avoid a scoping problem (the lambda function is not accessible outside the scope of the switch statement).
Is there a solution using lambda functions? What is the best option, from an efficiency viewpoint and from a readability viewpoint?
Maybe you want something like this...? (see it run here
#include <iostream>
#include <vector>
#include <cmath>
#include <functional>
enum Func { Sin, Sinc };
std::vector<float> f(int n, Func func)
{
std::vector<float> results(n);
float tmp = (n - 1) / 2.0;
int i;
std::function<float()> fns[] = {
[&] { return sin(M_PI * i / tmp); },
[&] { return sin(M_PI * i / tmp) / (M_PI * i / tmp); }
};
auto& fn = fns[func];
for (i=0; i<n; ++i)
results[i] = fn();
return results;
}
int main()
{
std::vector<float> x = f(10, Sin);
for (auto& v : x) std::cout << v << ' '; std::cout << '\n';
std::vector<float> y = f(10, Sinc);
for (auto& v : y) std::cout << v << ' '; std::cout << '\n';
}
Output:
0 0.642788 0.984808 0.866025 0.34202 -0.34202 -0.866025 -0.984808 -0.642788 -2.44929e-16
-nan 0.920725 0.705317 0.413497 0.122477 -0.0979816 -0.206748 -0.201519 -0.115091 -3.89817e-17
One option that may not be fast (there is indirection on each function call) but that would be a bit more flexible would be to create an std::map<FuncType, std::function<float(int,float)>>. You can't use std::generate() because you need the argument i to calculate the result, but writing your own is not that hard:
template <typename Iterator, typename Generator, typename Index, typename... Args>
void generate_i(Iterator first, Iterator last, Generator gen, Index i, Args... args)
{
while (first != last) {
*first = gen(i, args...);
++i;
++first;
}
}
Now that we have this, we need to populate a map of functors:
using FuncTypeFunction = std::function<float(int,float)>;
using FuncTypeFunctionMap = std::map<FuncType, FuncTypeFunction>;
FuncTypeFunctionMap create_functype_map()
{
FuncTypeFunctionMap functions;
functions[SIN] = [] (int i, float tmp) {
return sin(M_PI * i / tmp);
};
functions[SINC] = [] (int i, float tmp) {
return sin(M_PI * i / tmp) / (M_PI * i / tmp);
};
// ...
return functions;
}
FuncTypeFunctionMap const FuncTypeFunctions = create_functype_map();
(If you prefer you can use boost.assign to improve readability of this bit.)
And finally, we can use this map:
std::vector<float> GetFuncVec(int N, FuncType type)
{
std::vector<float> fn(N);
float tmp = (N - 1) / 2.0;
auto func = FuncTypeFunctions.find(type);
if (func != FuncTypeFunctions.end()) {
generate_i(fn.begin(), fn.end(), func->second, 0, tmp);
}
return fn;
}
Adding new functions only requires populating the map in create_functype_map(). Note that each iteration in the generate_i() loop is going to invoke the operator() on std::function, which will require a level of indirection to resolve the call, similar to the overhead of a virtual method invocation. This will cost a bit in terms of performance but may not be an issue for you.
(See a demo)
You may write a general class that will be used in standard algorithm std::iota
For example
#include <iostream>
#include <functional>
#include <vector>
#include <numeric>
class Value
{
public:
Value() : i( 0 ), fn( []( size_t i ) { return ( float )i; } ) {}
Value & operator ++() { ++i; return *this; }
operator float () const { return fn( i ); }
Value & operator =( std::function<float( size_t )> fn )
{
this->fn = fn;
return *this;
}
private:
size_t i;
std::function<float( size_t )> fn;
};
enum E { First, Second };
std::vector<float> f( size_t N, E e )
{
Value value;
float tmp = N / 2.0f;
switch( e )
{
case First:
value = [tmp] ( size_t i ) { return i * tmp; };
break;
case Second:
value = [tmp] ( size_t i ) { return i * tmp + tmp; };
break;
}
std::vector<float> v( N );
std::iota( v.begin(), v.end(), value );
return v;
}
int main()
{
for ( float x : f( 10, First ) ) std::cout << x << ' ';
std::cout << std::endl;
for ( float x : f( 10, Second ) ) std::cout << x << ' ';
std::cout << std::endl;
return 0;
}
The output is
0 5 10 15 20 25 30 35 40 45
5 10 15 20 25 30 35 40 45 50
Of course you may use your own lambda expressions that include some mathematical functions like sin
I'm trying to re-write some MatLab code in C++ and I've come across this:
currentsign = sign(vector(i));
I have looked on the internet and found this link: http://www.mathworks.co.uk/help/techdoc/ref/sign.html
I'm just wondering if there's a sign function in C++? If not, can anyone suggest any tutorials on creating it.
Thank you :)
template <typename T>
int sign (const T &val) { return (val > 0) - (val < 0); }
Credit due to Ambroz Bizjak.
template <typename T>
std::vector<int> sign (const std::vector<T> &v) {
std::vector<int> r(v.size());
std::transform(v.begin(), v.end(), r.begin(), (int(*)(const T&))sign);
return r;
}
Full example on ideone.
I would suggest
First, write a function of functor that takes a single element and returns 1, -1 or 0 depending on the element's value
Second, use std::transform together with this function/functor to take an input container and fill a second container with the desired values
template <typename T>
int signum(const T& val) {
// implement signum logic
}
#include <vector>
#include <algorithm>
int main() {
std::vector<int> data = ....;
std::vector<int> signs(data.size());
std::transform(data.begin(), data.end(), signs.begin(), signum<int>);
}
#include <algorithm>
#include <iostream>
#include <iomanip>
#include <vector>
inline int get_signum(int val) {
return val < 0 ? -1
: val == 0 ? 0
: 1;
}
int main() {
std::vector<int> values;
for (int i = -5; i < 6; ++i)
values.push_back(i);
std::vector<int> signum(values.size());
std::transform(values.begin(), values.end(), signum.begin(), get_signum);
for (int i = 0; i < values.size(); ++i) {
std::cout << std::setw(2) << values[i] << ' ' << signum[i] << std::endl;
}
return 0;
}
Well You can do It at compile time using template Specialization.
You can use sign<n>::Positive, sign<n>::Negetive and sign<n>::Zero also you can use sign<n>::Sign which is 1|0|-1 which is same as sign of Matlab.
#include <iostream>
template<int n>
struct sign{
enum{
Positive = (n > 0),
Negetive = (n < 0),
Zero = 0,
Sign = ((n > 0) ? 1 : -1)
};
};
template<>
struct sign<0>{
enum{
Positive = 0,
Negetive = 0,
Zero = 1,
Sign = 0
};
};
int main(){
std::cout << sign<0>::Positive << sign<0>::Negetive << sign<0>::Zero << sign<0>::Sign << std::endl;
std::cout << sign<1>::Positive << sign<1>::Negetive << sign<1>::Zero << sign<1>::Sign << std::endl;
std::cout << sign<-1>::Positive << sign<-1>::Negetive << sign<-1>::Zero << sign<-1>::Sign << std::endl;
return 0;
}
You used to do sign(n) there and here you will do sign<n>::Sign.
C99 has signbit() and copysign(), which seem to be implemented in glibc on Linux. You didn't specify what platform you're on though, so I'm not sure that helps...