Using boost wave - c++

I'm trying to get my head around boost wave, and so far, I'm not having much luck.
I tried the sample code from the site. It is below:
#include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
#include <vector>
///////////////////////////////////////////////////////////////////////////////
// Include Wave itself
#include <boost/wave.hpp>
///////////////////////////////////////////////////////////////////////////////
// Include the lexer stuff
#include <boost/wave/cpplexer/cpp_lex_token.hpp> // token class
#include <boost/wave/cpplexer/cpp_lex_iterator.hpp> // lexer class
int main () {
// The following preprocesses a given input file.
// Open the file and read it into a string variable
std::ifstream instream("lex_infile");
std::string input(
std::istreambuf_iterator<char>(instream.rdbuf()),
std::istreambuf_iterator<char>());
// The template boost::wave::cpplexer::lex_token<> is the
// token type to be used by the Wave library.
// This token type is one of the central types throughout
// the library, because it is a template parameter to some
// of the public classes and templates and it is returned
// from the iterators.
// The template boost::wave::cpplexer::lex_iterator<> is
// the lexer iterator to use as the token source for the
// preprocessing engine. In this case this is parametrized
// with the token type.
typedef boost::wave::cpplexer::lex_iterator<
boost::wave::cpplexer::lex_token<> >
lex_iterator_type;
typedef boost::wave::context<
std::string::iterator, lex_iterator_type>
context_type;
context_type ctx(input.begin(), input.end(), "lex_infile");
// At this point you may want to set the parameters of the
// preprocessing as include paths and/or predefined macros.
//ctx.add_include_path("...");
//ctx.add_macro_definition(...);
// Get the preprocessor iterators and use them to generate
// the token sequence.
context_type::iterator_type first = ctx.begin();
context_type::iterator_type last = ctx.end();
std::cout << "HERE" << std::endl;
// The input stream is preprocessed for you during iteration
// over [first, last)
while (first != last) {
std::cout << (*first).get_value() << std::endl;
++first;
}
}
It compiles ok, but when I feed a file into it, I get the following error:
terminate called after throwing an instance of 'boost::exception_detail::clone_impl >'
what(): boost::wave::preprocess_exception
Aborted
The code I'm trying to 'preprocess' is in a file called lex_infile, with the following contents:
#include <oglre>
#include <light>
#include <material>
in vec3 in_Position;
in vec2 in_Texture;
in vec3 in_Normal;
out vec2 textureCoord;
out vec4 pass_Color;
void main() {
gl_Position = pvmMatrix * vec4(in_Position, 1.0);
textureCoord = in_Texture;
vec3 normalDirection = normalize(normalMatrix * in_Normal);
vec3 lightDirection = normalize(vec3(lightSources[0].direction));
vec3 diffuseReflection = vec3(lightSources[0].diffuse) * vec3(mymaterial.diffuse) * max(0.0, dot(normalDirection, lightDirection));
/*
float bug = 0.0;
bvec3 result = equal( diffuseReflection, vec3(0.0, 0.0, 0.0) );
if(result[0]) bug = 1.0;
diffuseReflection.x += bug;
*/
pass_Color = vec4(diffuseReflection, 1.0);
}
I imagine I need to define the include locations....how would i even do that?
Sorry if this is simple stuff, I'm just a little lost.

Figured it out.
I need to extend the class public wave::context_policies::default_preprocessing_hooks, and then override the method found_unknown_directive.
Once that was done, I needed to pass my new preprocessing hooks class into typedef boost::wave::context as a template parameter.
It looks like this:
typedef boost::wave::context<
std::string::iterator,
lex_iterator_type,
load_file_to_string,
custom_directives_hooks
> context_type;
and
class custom_directives_hooks
: public wave::context_policies::default_preprocessing_hooks
{
public:
template <typename ContextT, typename ContainerT>
bool
found_unknown_directive(ContextT const& ctx, ContainerT const& line,
ContainerT& pending)
{
typedef typename ContainerT::const_iterator iterator_type;
iterator_type it = line.begin();
wave::token_id id = wave::util::impl::skip_whitespace(it, line.end());
if (id != wave::T_IDENTIFIER)
return false; // nothing we could do
if (it->get_value() == "version" || it->get_value() == "extension") {
// Handle #version and #extension directives
std::copy(line.begin(), line.end(), std::back_inserter(pending));
return true;
}
if (it->get_value() == "type") {
// Handle type directive
return true;
}
// Unknown directive
return false;
}
};
Hope that helps anyone else having this problem.

Related

Restore order of polygons after CGAL::Polyline_simplification_2

I follow this example of the CGAL documentation in order to simplify a number of polygons, which are initially stored in a vector called polys. Afterwards I collect the simplified polygons into a vector called simple_polys. However, the order of the polygons is mixed up during the simplification. This means simple_polys[i] is not necessarily the simplified version of polys[i]. I would be thankful for any ideas on how to adapt my code (see below) such that the order of the polygons is maintained or restored.
Notice: The obvious solution to simplify each polygon individually is not an option for me, since I need to preserve the common edges of the polygons (if there are any), i.e., the common edges are allowed to be simplified but they must remain common edges.
Here is the current version of my code:
#include <vector>
#include <CGAL/Exact_predicates_exact_constructions_kernel.h>
#include <CGAL/Polygon_2.h>
#include <CGAL/Constrained_Delaunay_triangulation_2.h>
#include <CGAL/Constrained_triangulation_plus_2.h>
#include <CGAL/Polyline_simplification_2/simplify.h>
#include <CGAL/Polyline_simplification_2/Squared_distance_cost.h>
#include <CGAL/IO/WKT.h>
// CGAL typedefs
typedef CGAL::Exact_predicates_exact_constructions_kernel Kernel;
typedef Kernel::Point_2 Point_2;
typedef CGAL::Polygon_2<Kernel> Polygon_2;
// CGAL classes needed for polyline simplification
namespace PS = CGAL::Polyline_simplification_2;
typedef PS::Vertex_base_2<Kernel> Vb;
typedef CGAL::Constrained_triangulation_face_base_2<Kernel> Fb;
typedef CGAL::Triangulation_data_structure_2<Vb, Fb> TDS;
typedef CGAL::Constrained_Delaunay_triangulation_2<Kernel, TDS, CGAL::Exact_predicates_tag> CDT;
typedef CGAL::Constrained_triangulation_plus_2<CDT> CT;
typedef PS::Stop_above_cost_threshold Stop;
typedef PS::Squared_distance_cost Cost;
int main()
{
// Read the polygons from WTK file
std::ifstream is("polys.wkt");
std::vector<Polygon_2> polys;
do {
Polygon_2 p;
CGAL::IO::read_polygon_WKT(is, p);
if(!p.is_empty())
polys.push_back(p);
} while(is.good() && !is.eof());
// Insert the polygons into a Constrained_triangulation_plus_2
CT ct;
for ( auto poly : polys ) ct.insert_constraint(poly);
// Simplify the polygons
PS::simplify(ct, Cost(), Stop(2.0));
// Convert the result back into normal polygons
std::vector<Polygon_2> simple_polys;
for( CT::Constraint_iterator cit = ct.constraints_begin(); cit != ct.constraints_end(); ++cit)
{
Polygon_2 poly;
for ( CT::Points_in_constraint_iterator vit = ct.points_in_constraint_begin(*cit);
vit != ct.points_in_constraint_end(*cit); ++vit) poly.push_back(*vit);
simple_polys.push_back(poly);
}
}
Here is an example of a WKT input file one can use to run the above code (name it polys.wkt):
POLYGON((46.465 -37.3521,35.5358 -20.6702,48.8869 -11.958,51.9866 -16.8288,50.0332 -18.1056,54.9318 -25.6487,55.8444 -25.0541,55.5429 -22.8841,73.1793 -11.5299,77.2208 -17.435))
POLYGON((150.934 48.3496,137.482 47.0145,136.581 54.533,134.581 54.3679,134.206 58.2453,136.107 58.4855,135.177 66.2472,148.772 67.1611))
POLYGON((113.969 -24.4624,103.013 -25.5321,102.007 -15.8837,101.437 -15.8568,101.143 -12.0671,101.652 -12.0653,100.581 -1.99834,107.794 -1.3459,107.853 -2.01614,111.495 -1.66637,111.568 -1.96882,111.947 -1.88637))
POLYGON((35.8102 -21.0888,27.2558 -8.05192,38.1005 -0.96724,44.4211 -10.2175,45.1996 -9.66818,45.021 -9.3973,45.5261 -7.35956,50.2289 -4.30611,50.4096 -3.43368,64.9678 6.02664,66.7379 -0.809735))
POLYGON((-1.28342 -115.889,22.725 -111.81,25.6615 -128.839,16.2488 -130.272,16.4807 -131.888,12.9582 -132.401,12.7173 -130.807,12.4127 -130.833,16.6721 -156.064,0.111882 -158.497,-1.28339 -150.737))
POLYGON((133.248 -68.93,100.825 -72.0011,99.8362 -61.4584,107.418 -60.8479,107.399 -60.397,110.68 -60.2175,110.691 -60.6869,120.743 -59.7579,120.696 -59.3589,124.041 -59.0963,124.058 -59.5348,132.275 -58.7058))
POLYGON((118.371 29.3127,111.786 28.6569,110.332 42.1233,117.682 42.8478,117.895 41.5415,118.397 41.5653,118.858 38.6083,118.3 38.521,118.704 35.4032,117.714 35.3187))
POLYGON((146.407 -67.6797,132.75 -68.9772,131.777 -58.752,135.766 -58.415,135.713 -57.87,138.974 -57.6063,139.028 -58.0592,145.445 -57.5652))
POLYGON((38.7765 -180.005,42.57 -177.511,39.8274 -173.17,45.4545 -169.387,34.4094 -152.557,47.1264 -144.271,47.0159 -144.108,51.1497 -141.344,53.8363 -145.461,55.5009 -144.64,58.878 -149.842,57.4133 -151.006,61.0026 -156.489,62.2453 -155.685,67.94 -140.543,85.4228 -166.825,64.2719 -180.516,60.9827 -175.481,56.3313 -178.594,57.1839 -179.899,44.1068 -188.413))
POLYGON((117.162 35.7683,136.609 37.8265,137.825 36.8336,138.697 36.9208,137.923 42.338,137.507 42.2789,137.23 44.2193,137.646 44.2784,137.59 44.664,137.165 44.6034,136.887 46.5429,137.314 46.6042,137.188 47.4854,150.877 48.8464,152.721 32.789,117.873 29.2627))
POLYGON((115.832 -45.2902,104.975 -46.2859,104.142 -37.6361,103.622 -37.7286,103.332 -34.3286,103.777 -34.0371,102.964 -25.0344,113.924 -23.9644))
POLYGON((66.5341 -28.0144,70.402 -33.9903,70.291 -34.3719,60.4063 -40.7947,59.9046 -40.7393,56.0238 -34.773,56.1665 -34.3699,56.3403 -34.274,66.1632 -27.8986))
POLYGON((105.673 -127.635,90.8711 -104.911,92.492 -102.406,115.3 -100.248,116.48 -111.052,112.586 -111.576,112.605 -111.81,111.09 -112.735,110.572 -112.55,110.365 -112.893,115.687 -121.196))
POLYGON((152.194 5.74036,111.195 -2.55811,111.11 -2.20564,107.405 -2.56147,106.262 7.74335,143.459 15.1823,142.642 22.9693,142.005 22.9033,141.67 26.0846,142.307 26.1507,141.68 32.1367,152.665 33.2868,154.612 15.5801,151.26 15.2101,151.847 9.88508,151.738 9.87268))
POLYGON((22.9004 58.0935,-1.28339 56.2459,-1.28339 62.8131,-0.817275 62.867,-0.1184 63.6614,5.80649 64.1534,6.12005 64.7858,8.92235 65.0356,9.85597 64.4913,21.9914 65.718))
I was able to figure it out myself. I post the solution here in case it would be useful for somebody else in the future. The solution is to make a connection between the indices of the polygons in the vector polys and the IDs of the constraints in the Constrained_triangulation_plus_2 object. Below is the adapted code which includes the solution to the problem.
#include <vector>
#include <CGAL/Exact_predicates_exact_constructions_kernel.h>
#include <CGAL/Polygon_2.h>
#include <CGAL/Constrained_Delaunay_triangulation_2.h>
#include <CGAL/Constrained_triangulation_plus_2.h>
#include <CGAL/Polyline_simplification_2/simplify.h>
#include <CGAL/Polyline_simplification_2/Squared_distance_cost.h>
#include <CGAL/IO/WKT.h>
// CGAL typedefs
typedef CGAL::Exact_predicates_exact_constructions_kernel Kernel;
typedef Kernel::Point_2 Point_2;
typedef CGAL::Polygon_2<Kernel> Polygon_2;
// CGAL classes needed for polyline simplification
namespace PS = CGAL::Polyline_simplification_2;
typedef PS::Vertex_base_2<Kernel> Vb;
typedef CGAL::Constrained_triangulation_face_base_2<Kernel> Fb;
typedef CGAL::Triangulation_data_structure_2<Vb, Fb> TDS;
typedef CGAL::Constrained_Delaunay_triangulation_2<Kernel, TDS, CGAL::Exact_predicates_tag> CDT;
typedef CGAL::Constrained_triangulation_plus_2<CDT> CT;
typedef PS::Stop_above_cost_threshold Stop;
typedef PS::Squared_distance_cost Cost;
int main()
{
// Read the polygons from WTK file
std::ifstream is("polys.wkt");
std::vector<Polygon_2> polys;
do {
Polygon_2 p;
CGAL::IO::read_polygon_WKT(is, p);
if(!p.is_empty())
polys.push_back(p);
} while(is.good() && !is.eof());
// Insert the polygons into a Constrained_triangulation_plus_2 and keep
// track of the constraint IDs
CT ct;
std::vector<CT::Constraint_id> constraint_IDs;
for ( auto poly : polys )
{
CT::Constraint_id ID = ct.insert_constraint(poly);
constraint_IDs.push_back(ID);
}
// Simplify the polygons
PS::simplify(ct, Cost(), Stop(2.0));
// Convert the result back into normal polygons
std::vector<Polygon_2> simple_polys(polys.size());
for( CT::Constraint_iterator cit = ct.constraints_begin(); cit != ct.constraints_end(); ++cit)
{
// Find the index of the constraint ID in the constraint_IDs vector. This is the index
// of the original polygon the current simplified polygon corresponds to
auto it = std::find(constraint_IDs.begin(), constraint_IDs.end(), *cit);
size_t idx = it - constraint_IDs.begin();
// Obtain the simplified polygon
Polygon_2 poly;
for ( CT::Points_in_constraint_iterator vit = ct.points_in_constraint_begin(*cit);
vit != ct.points_in_constraint_end(*cit); ++vit) poly.push_back(*vit);
// Write the simplified polygon to the correct location
simple_polys[idx] = poly;
}
}

C++ Single Layer Multi Output Perceptron Weird Behaviour

Some background:
I wrote a single layer multi output perceptron class in C++. It uses the typical WX + b discriminant function and allows for user-defined activation functions. I have tested everything pretty throughly and it all seems to be working as I expect it to. I noticed a small logical error in my code, and when I attempted to fix it the network performed much worse than before. The error is as follows:
I evaluate the value at each output neuron using the following code:
output[i] =
activate_(std::inner_product(weights_[i].begin(), weights_[i].end(),
features.begin(), -1 * biases_[i]));
Here I treat the bias input as a fixed -1, but when I apply the learning rule to each bias, I treat the input as +1.
// Bias can be treated as a weight with a constant feature value of 1.
biases_[i] = weight_update(1, error, learning_rate_, biases_[i]);
So I attempted to fix my mistake by changing the call to weight_updated to be conistent with the output evaluation:
biases_[i] = weight_update(-1, error, learning_rate_, biases_[i]);
But doing so results in a 20% drop in accuracy!
I have been pulling my hair out for the past few days trying to find some other logical error in my code which might explain this strange behaviour, but have come up empty handed. Can anyone with more knowledge than I provide any insight into this? I have provided the entire class below for reference. Thank you in advance.
#ifndef SINGLE_LAYER_PERCEPTRON_H
#define SINGLE_LAYER_PERCEPTRON_H
#include <cassert>
#include <functional>
#include <numeric>
#include <vector>
#include "functional.h"
#include "random.h"
namespace qp {
namespace rf {
namespace {
template <typename Feature>
double weight_update(const Feature& feature, const double error,
const double learning_rate, const double current_weight) {
return current_weight + (learning_rate * error * feature);
}
template <typename T>
using Matrix = std::vector<std::vector<T>>;
} // namespace
template <typename Feature, typename Label, typename ActivationFn>
class SingleLayerPerceptron {
public:
// For testing only.
SingleLayerPerceptron(const Matrix<double>& weights,
const std::vector<double>& biases, double learning_rate)
: weights_(weights),
biases_(biases),
n_inputs_(weights.front().size()),
n_outputs_(biases.size()),
learning_rate_(learning_rate) {}
// Initialize the layer with random weights and biases in [-1, 1].
SingleLayerPerceptron(std::size_t n_inputs, std::size_t n_outputs,
double learning_rate)
: n_inputs_(n_inputs),
n_outputs_(n_outputs),
learning_rate_(learning_rate) {
weights_.resize(n_outputs_);
std::for_each(
weights_.begin(), weights_.end(), [this](std::vector<double>& wv) {
generate_back_n(wv, n_inputs_,
std::bind(random_real_range<double>, -1, 1));
});
generate_back_n(biases_, n_outputs_,
std::bind(random_real_range<double>, -1, 1));
}
std::vector<double> predict(const std::vector<Feature>& features) const {
std::vector<double> output(n_outputs_);
for (auto i = 0ul; i < n_outputs_; ++i) {
output[i] =
activate_(std::inner_product(weights_[i].begin(), weights_[i].end(),
features.begin(), -1 * biases_[i]));
}
return output;
}
void learn(const std::vector<Feature>& features,
const std::vector<double>& true_output) {
const auto actual_output = predict(features);
for (auto i = 0ul; i < n_outputs_; ++i) {
const auto error = true_output[i] - actual_output[i];
for (auto weight = 0ul; weight < n_inputs_; ++weight) {
weights_[i][weight] = weight_update(
features[weight], error, learning_rate_, weights_[i][weight]);
}
// Bias can be treated as a weight with a constant feature value of 1.
biases_[i] = weight_update(1, error, learning_rate_, biases_[i]);
}
}
private:
Matrix<double> weights_; // n_outputs x n_inputs
std::vector<double> biases_; // 1 x n_outputs
std::size_t n_inputs_;
std::size_t n_outputs_;
ActivationFn activate_;
double learning_rate_;
};
struct StepActivation {
double operator()(const double x) const { return x > 0 ? 1 : -1; }
};
} // namespace rf
} // namespace qp
#endif /* SINGLE_LAYER_PERCEPTRON_H */
I ended up figuring it out...
My fix was indeed correct and the loss of accuracy was just a consequence of having a lucky (or unlucky) dataset.

Using CURAND inside a Thrust functor

Is it possible to use CURAND together with Thrust inside a device functor? A minimum code example can be:
#include <thrust/device_vector.h>
struct Move
{
Move() {}
using Position = thrust::tuple<double, double>;
__host__ __device__
Position operator()(Position p)
{
thrust::get<0>(p) += 1.0; // use CURAND to add a random N(0,1)
thrust::get<1>(p) += 1.0; // use CURAND to add a random N(0,1)
return p;
}
};
int main()
{
// Create vectors on device
thrust::device_vector<double> p1(10, 0.0);
thrust::device_vector<double> p2(10, 0.0);
// Create zip iterators
auto pBeg = thrust::make_zip_iterator(thrust::make_tuple(p1.begin(), p2.begin()));
auto pEnd = thrust::make_zip_iterator(thrust::make_tuple(p1.end(), p2.end() ));
// Move points in the vectors
thrust::transform(pBeg, pEnd, pBeg, Move());
// Print result (just for debug)
thrust::copy(p1.begin(), p1.end(), std::ostream_iterator<double>(std::cout, "\n"));
thrust::copy(p2.begin(), p2.end(), std::ostream_iterator<double>(std::cout, "\n"));
return 0;
}
What is the right way to create random numbers inside the operator function?
Is it possible to use CURAND together with Thrust inside a device functor?
Yes, it's possible. As indicated by #m.s. most of what you need from curand can be gotten from the curand device api example in the curand documentation. (In fact, there is even a full thrust/curand sample code in the documentation here)
We can mimic the behavior of the setup kernel indicated there with a thrust algorithm call, eg. thrust::for_each_n to setup the initial curand state variables for each device vector element.
After that, it is only necessary to pass the initialized curand state to your Move functor via an additional iterator in your zip iterators, and then call curand_uniform (for example) within the functor.
Here is a fully worked example, based on your code:
$ cat t20.cu
#include <thrust/device_vector.h>
#include <curand_kernel.h>
#include <iostream>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <thrust/for_each.h>
const int seed = 1234;
const int ds = 10;
const int offset = 0;
struct Move
{
Move() {}
using Position = thrust::tuple<double, double, curandState>;
__device__
Position operator()(Position p)
{
curandState s = thrust::get<2>(p);
thrust::get<0>(p) += curand_uniform(&s); // use CURAND to add a random N(0,1)
thrust::get<1>(p) += curand_uniform(&s); // use CURAND to add a random N(0,1)
thrust::get<2>(p) = s;
return p;
}
};
struct curand_setup
{
using init_tuple = thrust::tuple<int, curandState &>;
__device__
void operator()(init_tuple t){
curandState s;
int id = thrust::get<0>(t);
curand_init(seed, id, offset, &s);
thrust::get<1>(t) = s;
}
};
int main()
{
// Create vectors on device
thrust::device_vector<double> p1(ds, 0.0);
thrust::device_vector<double> p2(ds, 0.0);
thrust::device_vector<curandState> s1(ds);
// Create zip iterators
auto pBeg = thrust::make_zip_iterator(thrust::make_tuple(p1.begin(), p2.begin(), s1.begin()));
auto pEnd = thrust::make_zip_iterator(thrust::make_tuple(p1.end(), p2.end(), s1.end() ));
auto pInit = thrust::make_zip_iterator(thrust::make_tuple(thrust::counting_iterator<int>(0), s1.begin()));
// initialize random generator
thrust::for_each_n(pInit, ds, curand_setup());
// Move points in the vectors
thrust::transform(pBeg, pEnd, pBeg, Move());
// Print result (just for debug)
thrust::copy(p1.begin(), p1.end(), std::ostream_iterator<double>(std::cout, "\n"));
thrust::copy(p2.begin(), p2.end(), std::ostream_iterator<double>(std::cout, "\n"));
return 0;
}
$ nvcc -arch=sm_61 -std=c++11 t20.cu -o t20 -lcurand
$ ./t20
0.145468
0.820181
0.550399
0.29483
0.914733
0.868979
0.321921
0.782857
0.0113023
0.28545
0.434899
0.926417
0.811845
0.308556
0.557235
0.501246
0.206681
0.123377
0.539587
0.198575
$
Regarding this question:
What is the right way to create random numbers inside the operator function?
There is no problem with using curand in thrust, but you might also want to be aware that thrust has a built in RNG facility and there is a fully worked usage example here.

boost::spirit::karma alternative selection based on properties of the input

I'm trying to write a boost::spirit::karma generator where some of the output depends on non-trivial properties of the input values.
The actual problem is part of a larger grammar, but this example has the same properties as several of the other troublesome rules and is actually one of the grammar rules that is causing me trouble.
I'll start with a minimal example that is almost what I want, and then work from there.
#include <boost/spirit/include/karma.hpp>
#include <boost/spirit/home/phoenix.hpp>
#include <boost/fusion/adapted.hpp>
#include <string>
#include <vector>
template<typename OutputIterator_T>
struct Test_Grammar :
boost::spirit::karma::grammar<OutputIterator_T, std::vector<double>()>
{
Test_Grammar() : Test_Grammar::base_type(start), start(), value()
{
namespace karma = boost::spirit::karma;
start
= *(value % karma::lit(", "))
;
value
= (karma::double_)
;
}
boost::spirit::karma::rule<OutputIterator_T, std::vector<double>()> start;
boost::spirit::karma::rule<OutputIterator_T, double()> value;
};
template <typename OutputIterator_T>
bool generate_output(OutputIterator_T& sink, std::vector<double> const& data)
{
Test_Grammar<OutputIterator_T> grammar;
return (boost::spirit::karma::generate(sink, grammar, data));
}
int main (int, char**)
{
std::string generated;
std::back_insert_iterator<std::string> sink(generated);
std::vector<double> data{1.5, 0.0, -2.5,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()};
generate_output(sink, data);
std::cout << generated << std::endl;
return 0;
}
The above code defines a grammar that, when fed with the test data, produces the output
1.5, 0.0, -2.5, nan, inf
However, the output that I want is
1.5, 0.0, -2.5, special, special
If I replace the value part of the grammar with
value
= (&karma::double_(std::numeric_limits<double>::quiet_NaN()) <<
karma::lit("special"))
| (&karma::double_(std::numeric_limits<double>::infinity()) <<
karma::lit("special"))
| (karma::double_)
;
I get the desired behavior for infinity. However, I do not get the desired result for NaN since NaN has the property that (NaN != NaN) in comparisons. So I need a way to use the fpclassify macros/functions such as isfinite().
I should be able to get what I want by replacing the value part of the grammar with
value
= (karma::eps(...) << karma::lit("special"))
| (karma::double_)
;
However, every combination of function calls, function pointers, and bind incantations that I've tried for the ... part has resulted in compiler errors.
Any help would be much appreciated.
UPDATE:
Sehe provided an excellent general solution (which I have accepted). Thank you!
For my particular use case, I was able to further simplify sehe's answer and wanted to document that here for others.
After changing all of the includes from <boost/spirit/home/*> to <boost/spirit/include/*> and defining BOOST_SPIRIT_USE_PHOENIX_V3 before those includes, I added the following line
BOOST_PHOENIX_ADAPT_FUNCTION(bool, isfinite_, std::isfinite, 1)
and changed the value part of the grammar to this
value
%= karma::double_[karma::_pass = isfinite_(karma::_1)]
| karma::lit("special")
;
I'd use the semantic action to dynamically "fail" the double_ generator:
value
%= karma::double_ [ karma::_pass = !(isnan_(karma::_1) || isinf_(karma::_1)) ]
| karma::lit("special")
;
Now, how do we get isnan_ and isinf_ implemented? I prefer to use Phoenix V3 (which will be the default in all coming releases of Boost):
BOOST_PHOENIX_ADAPT_FUNCTION(bool, isnan_, std::isnan, 1)
BOOST_PHOENIX_ADAPT_FUNCTION(bool, isinf_, std::isinf, 1)
That's all. See it Live On Coliru
Notes
use %= to get automatic attribute propagation even though there is a semantic action
include include/*.hpp instead of home/*.hpp
Full Listing:
#define BOOST_SPIRIT_USE_PHOENIX_V3
#include <boost/spirit/include/karma.hpp>
#include <boost/spirit/include/phoenix.hpp>
#include <boost/spirit/include/phoenix_function.hpp>
#include <boost/fusion/adapted.hpp>
#include <string>
#include <vector>
#include <cmath>
BOOST_PHOENIX_ADAPT_FUNCTION(bool, isnan_, std::isnan, 1)
BOOST_PHOENIX_ADAPT_FUNCTION(bool, isinf_, std::isinf, 1)
template<typename OutputIterator_T>
struct Test_Grammar :
boost::spirit::karma::grammar<OutputIterator_T, std::vector<double>()>
{
Test_Grammar() : Test_Grammar::base_type(start), start(), value()
{
namespace karma = boost::spirit::karma;
namespace phx = boost::phoenix;
start
= *(value % karma::lit(", "))
;
value
%= karma::double_ [ karma::_pass = !(isnan_(karma::_1) || isinf_(karma::_1)) ]
| karma::lit("special")
;
}
boost::spirit::karma::rule<OutputIterator_T, std::vector<double>()> start;
boost::spirit::karma::rule<OutputIterator_T, double()> value;
};
template <typename OutputIterator_T>
bool generate_output(OutputIterator_T& sink, std::vector<double> const& data)
{
Test_Grammar<OutputIterator_T> grammar;
return (boost::spirit::karma::generate(sink, grammar, data));
}
int main (int, char**)
{
std::string generated;
std::back_insert_iterator<std::string> sink(generated);
std::vector<double> data{1.5, 0.0, -2.5,
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::infinity()};
generate_output(sink, data);
std::cout << generated << std::endl;
return 0;
}
Output
1.5, 0.0, -2.5, special, special

Retrieve Object Position for Projection Shadow Calculation

I am working on a class project and I need to implement shadows into our project, using OpenGL-esque coding. Our specific project is a spider running around a house. I need to make shadows appear on the floor below the spider (don't care about shadows for anything else).
The main question I need answered is this: When calculating shadows, How do I retrieve a point on the spider? In our notes, it says we have 3 givens in order to begin our initial calculations: 1) Plane (we know A,B,C,D) 2) Light source at (Lx,Ly,Lz) 3) Point q in space: Shadow caster
I do not understand how to retrieve "q". We load the spider mesh using "Mesh" and "SuperMesh" classes, and pass the mesh onto the vertex shader, but I do not understand how to reference points on the spider while on the CPU side.
I'm hesitant to post all of my code, mainly because this project uses a special library "llgl" that's based on OpenGL that our teacher made explicity for this class (makes debugging super easy, right?), and I'm not entirely sure what all would be needed in order to have my question properly answered. I'll include the Mesh and SuperMesh classes, as well as the declarations of loading the spider and passing it to the vertex shader as a matrix (which is more relevant). If you need more code, please request it. Hopefully it's more of a "theory" response than a syntax response, so I don't frustrate anyone trying to understand our code, haha.
Thank you for your time.
Main: (literally all of the excess code is cut out for simplicity)
...
SuperMesh* spider = new SuperMesh("spider/SmoothSpider.spec.mesh");
mat4 spiderR = rotationY(M_PI); // Rotation matrix for spider
vec3 spiderT(0,-2.5,-8.0); // Translation for spider
...
... (in draw)
//prog1 simply defines the txt files for the vertex and fragment shader
prog1->setUniform("worldMatrix", // sets world matrix for spider
spiderR * translation(vec4(spiderT.x,
spiderT.y,
spiderT.z,
0.0)));
spider->draw(prog1);
...
Mesh: (Hopefully not needed)
#ifndef MESH_H
#define MESH_H
#include "llgl.h"
#include <string>
#include <vector>
using namespace std;
using namespace llgl;
class Mesh{
public:
string fname;
bool initialized;
vector<uint8_t> vdata;
vector<uint8_t> idata;
vec4 color;
vec4 scolor;
int isize;
GLenum ienum;
unsigned vbuff,ibuff;
uint32_t ni;
TextureX* diffusetex;
TextureX* ramp;//used for cell shading, this is the "gradient"
Mesh(string fname);
void setup();
void draw(Program& prog);
void draw(Program* prog){ draw(*prog); }
};
#endif
SuperMesh:
#pragma once
#include "Mesh.h"
#include <iostream>
#include <vector>
#include <string>
#include <fstream>
using namespace std;
struct SuperMesh
{
vector<Mesh*> m;
string fname;
bool loaded;
SuperMesh(string specfile)
{
fname = specfile;
loaded = false;
}
void load()
{
if(loaded)
return;
ifstream in(fname.c_str());
if(!in.good())
throw runtime_error("Cannot load file " + fname);
string pfx;
unsigned slashidx;
slashidx = fname.find("/");
if(slashidx == string::npos)
slashidx = fname.find("\\");
if(slashidx != string::npos)
pfx = fname.substr(0, slashidx) + "/";
while(1)
{
string s;
getline(in, s);
if(in.fail())
break;
if(s.find("binarymesh ") == 0)
{
string fname = s.substr(11);
m.push_back(new Mesh(pfx + fname));
m.back()->setup();
}
}
loaded = true;
}
void draw(Program* p)
{
if(!loaded)
load();
for(unsigned i = 0; i < m.size(); ++i)
{
m[i]->draw(p);
}
}
};