A little bit of background: I have some strange multiple nested loops which I converted to flat work queue (basically collapse single index loops to single multi-index loop). right now each loop is hand coded.
I am trying to generalized approach to work with any bounds using lambda expressions:
For example:
// RANGE(i,I,N) is basically a macro to generate `int i = I; i < N; ++i `
// for (RANGE(lb, N)) {
// for (RANGE(jb, N)) {
// for (RANGE(kb, max(lb, jb), N)) {
// for (RANGE(ib, jb, kb+1)) {
// is equivalent to something like (overload , to produce range)
flat<1, 3, 2, 4>((_2, _3+1), (max(_4,_3), N), N, N)
the internals of flat are something like:
template<size_t I1, size_t I2, ...,
class L1_, class L2, ..._>
boost::array<int,4> flat(L1_ L1, L2_ L2, ...){
//boost::array<int,4> current; class or static variable
// basically, the code below this is going to be done using recursion templates
// but to do that I need to apply lambda expression to current array
// to get runtime bounds
bool advance;
L2_ l2 = L2.bind(current); // bind current value to lambda
{
L1_ l1 = L1.bind(current); //bind current value to innermost lambda
l1.next();
advance = !(l1 < l1.upper()); // some internal logic
if (advance) {
l2.next();
current[0] = l1.lower();
}
}
//...,
}
my question is, can you give me some ideas how to write lambda (derived from boost) which can be bound to index array reference to return upper, lower bounds according to lambda expression?
thank you much
bummers, lambda only supports three placeholders.
well, this is the prototype so far
119 #include <boost/lambda/lambda.hpp>
120
121 namespace generator {
122
123 // there is no _1 because it's innermost
124 // and lambda only has three placeholders
125 boost::lambda::placeholder1_type _2;
126 boost::lambda::placeholder2_type _3;
127 boost::lambda::placeholder3_type _4;
128
129 template<class L, class U>
130 struct range_ {
131 typedef boost::array<int,4> index_type;
132 range_(L lower, U upper) : lower_(lower), upper_(upper) {}
133 size_t lower(const index_type &index) {
134 return lower_(index[1], index[2], index[3]);
135 }
136 size_t upper(const index_type &index) {
137 return upper_(index[1], index[2], index[3]);
138 }
139 L lower_; U upper_;
140 };
141
142 template<class L, class U>
143 range_<L,U> range(L lower, U upper) {
144 return range_<L,U>(lower, upper);
145 }
146
147 template<class R1, class R2, class R3, class R4>
148 struct for_{
149 typedef boost::array<int,4> index_type;
150 index_type index;
151 R1 r1_; R2 r2_; R3 r3_; R4 r4_;
152 for_(R1 r1, R2 r2, R3 r3, R4 r4)
153 : r1_(r1), r2_(r2), r3_(r3), r4_(r4) {}
154 index_type next() {
155 index_type next = index;
156
157 bool advance = false;
158 index[0] += 1;
159
160 advance = !(index[0] < r1_.upper(index));
161 if (advance) {
162 index[1] += 1;
163 index[0] = r1_.lower(index);
164 }
165
166 advance = advance && !(index[1] < r2_.upper(index));
167 if (advance) {
168 index[2] += 1;
169 index[1] = r2_.lower(index);
170 index[0] = r1_.lower(index);
171 }
172
173 advance = advance && !(index[2] < r3_.upper(index));
174 if (advance) {
175 index[3] += 1;
176 index[2] = r3_.lower(index);
177 index[1] = r2_.lower(index);
178 index[0] = r1_.lower(index);
179 }
180
181 //std::cout << next << std::endl;
182 return next;
183
184 }
185 };
186
187 template<class R1, class R2, class R3, class R4>
188 for_<R1, R2, R3, R4> For(R1 r1, R2 r2, R3 r3, R4 r4) {
189 return for_<R1, R2, R3, R4>(r1, r2, r3, r4);
190 }
191
192 }
example (probably broken, needs few more functions)
using namespace generator;
For(range(_2, _3), range(std::max(_3, _4), N), range(N), range(N));
Related
I am continuing my motion-planning algorithm with a recursive Node structure and a boost geometry rtree. However, I'm running into a couple of issues in which the value at a Node pointer keeps changing. Here is a MWE:
#include <algorithm>
#include <iostream>
#include <cmath>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <vector>
#include <set>
#include <boost/geometry.hpp>
#include <boost/geometry/geometry.hpp>
#include <boost/geometry/geometries/point.hpp>
#include <boost/geometry/geometries/box.hpp>
#include <boost/geometry/geometries/register/point.hpp>
#include <boost/geometry/index/rtree.hpp>
using namespace std;
namespace bg = boost::geometry;
namespace bgi = boost::geometry::index;
typedef bg::model::point<double, 2, bg::cs::cartesian> point;
struct Node
{
struct Node *parent;
point pos;
int id;
};
typedef pair<Node, unsigned int> ptval;
BOOST_GEOMETRY_REGISTER_POINT_2D_GET_SET(Node, double, bg::cs::cartesian, pos.get<0>, pos.get<1>, pos.set<0>, pos.set<1>);
double px(point& p) {
return pbruh.get<0>();
}
double nx(Node *n) {
return px(n->pos);
}
double py(point& p)
{
return pbruh.get<1>();
}
double ny(Node *n)
{
return py(n->pos);
}
int main() {
point start(0, 0), end(200, 200);
Node *root = new Node;
root->parent = NULL;
root->pos = start;
root->id = 0;
Node *last = root;
bgi::rtree<ptval, bgi::quadratic<16>> pts;
pts.insert(make_pair(*root, 0));
int c = 1; // running count
srand(time(nullptr)); // seed rng
vector<int> pars; // actual parent ids
pars.push_back(-1);
Node *save;
int marker = 500;
while (c<2000)
{
// generate random pos
double xr = ((double)rand() / RAND_MAX) * 200;
double yr = ((double)rand() / RAND_MAX) * 200;
point random(xr, yr);
vector<ptval> res;
pts.query(bgi::nearest(random, 1), back_inserter(res)); // get nearest point in rtree
Node *pnearest = &(res[0].first);
Node *pnew = new Node;
pnew->pos = random;
pnew->parent = pnearest; pnew->id = c; if (c == marker) { save = pnew; }
pars.push_back(pnearest->id);
if (c > marker && c < marker+50 && save->parent->id != pars[marker]) { cout << "CHANGED " << c << " " << pars[marker] << " " << save->parent->id << " " << save->parent << " " << nx(save->parent) << " " << ny(save->parent) << endl; }
pts.insert(make_pair(*pnew, c));
last = pnew;
c++;
}
cout << "ULTIMATE " << pars[marker] << " " << save->parent->id << endl;
return 0;
}
An example output is
CHANGED 501 416 118 0x257c1b0 134.416 103.623
CHANGED 502 416 412 0x257c1b0 187.164 150.841
CHANGED 503 416 190 0x257c1b0 176.128 162.548
CHANGED 504 416 212 0x257c1b0 68.16 167.425
CHANGED 505 416 487 0x257c1b0 0.701926 114.237
CHANGED 506 416 61 0x257c1b0 16.8645 91.7386
CHANGED 507 416 221 0x257c1b0 160.991 62.9841
CHANGED 508 416 439 0x257c1b0 65.627 130.284
CHANGED 509 416 203 0x257c1b0 146.312 189.367
CHANGED 510 416 140 0x257c1b0 164.946 30.2683
CHANGED 511 416 76 0x257c1b0 193.194 146.336
CHANGED 512 416 286 0x257c1b0 29.8898 124.509
CHANGED 513 416 14 0x257c1b0 88.4732 88.3816
CHANGED 514 416 340 0x257c1b0 179.907 93.4538
CHANGED 515 416 409 0x257c1b0 26.5389 94.4609
CHANGED 516 416 488 0x257c1b0 98.8983 12.36
CHANGED 517 416 256 0x257c1b0 141.984 180.651
CHANGED 518 416 256 0x257c1b0 141.984 180.651
CHANGED 519 416 256 0x257c1b0 141.984 180.651
CHANGED 520 416 256 0x257c1b0 141.984 180.651
CHANGED 521 416 256 0x257c1b0 141.984 180.651
CHANGED 522 416 256 0x257c1b0 141.984 180.651
CHANGED 523 416 256 0x257c1b0 141.984 180.651
CHANGED 524 416 256 0x257c1b0 141.984 180.651
CHANGED 525 416 256 0x257c1b0 141.984 180.651
CHANGED 526 416 256 0x257c1b0 141.984 180.651
CHANGED 527 416 256 0x257c1b0 141.984 180.651
CHANGED 528 416 256 0x257c1b0 141.984 180.651
CHANGED 529 416 256 0x257c1b0 141.984 180.651
CHANGED 530 416 256 0x257c1b0 141.984 180.651
CHANGED 531 416 256 0x257c1b0 141.984 180.651
CHANGED 532 416 256 0x257c1b0 141.984 180.651
CHANGED 533 416 256 0x257c1b0 141.984 180.651
CHANGED 534 416 256 0x257c1b0 141.984 180.651
CHANGED 535 416 256 0x257c1b0 141.984 180.651
CHANGED 536 416 256 0x257c1b0 141.984 180.651
CHANGED 537 416 256 0x257c1b0 141.984 180.651
CHANGED 538 416 256 0x257c1b0 141.984 180.651
CHANGED 539 416 256 0x257c1b0 141.984 180.651
CHANGED 540 416 256 0x257c1b0 141.984 180.651
CHANGED 541 416 256 0x257c1b0 141.984 180.651
CHANGED 542 416 256 0x257c1b0 141.984 180.651
CHANGED 543 416 256 0x257c1b0 141.984 180.651
CHANGED 544 416 256 0x257c1b0 141.984 180.651
CHANGED 545 416 256 0x257c1b0 141.984 180.651
CHANGED 546 416 256 0x257c1b0 141.984 180.651
CHANGED 547 416 256 0x257c1b0 141.984 180.651
CHANGED 548 416 256 0x257c1b0 141.984 180.651
CHANGED 549 416 256 0x257c1b0 141.984 180.651
ULTIMATE 416 599
As you can see, for all of these iterations, same->parent->id differs from the actual id of the parent (416) when same was processed on its iteration, and for some reason same->parent->id fluctuates on these first few iterations. However, the pointer address remains the same. What is happening in the heap memory that is causing these values to fluctuate and then stop but still be at the wrong address? This is very weird. I suspect it may have something to do with the Boost RTree as I store a direct reference to each Node in it and Boost may be doing some underlying operations, but that is just a theory. I may also just be a bit misguided on how pointers work.
Does anyone know how to fix the issue?
I'm assuming the missing functions are:
double px(point& p) { return p.get<0>(); }
double py(point& p) { return p.get<1>(); }
double nx(Node* n) { return px(n->pos); }
double ny(Node* n) { return py(n->pos); }
The first real issue I see is:
pts.insert(make_pair(*root, 0));
That inserts a copy of the object pointed to by root. Any references will not point to the copy. root is always leaked for no reason.
This is a better start that might actually work until the tree invalidates references:
pts.insert(std::make_pair(Node{nullptr, start, 0}, 0));
Node const* last = &pts.begin()->first;
Same here:
pts.insert(std::make_pair(*pnew, id));
last = pnew;
*pnew is always leaked, and you didn't want last to point to it. To "fix" it naievely, I'd use a helper function:
auto insert = [&pts](Node n) -> Node const* {
pts.insert(std::make_pair(n, n.id));
auto it = std::find_if(pts.begin(), pts.end(),
[](ptval const& p) { return p.second == n.id; });
return &it->first;
};
Node const* last = insert(Node{nullptr, start, 0});
As others have noted, this is definitely wrong:
ptval res;
pts.query(bgi::nearest(newpos, 1), &res);
Node* pnearest = &(res[0].first);
This by definition takes the address of the local copy of a ptval's Node element. Instead, let's use the same naive (probably enormously inefficient) approach:
auto find = [&pts](int id) -> Node const* {
auto it = std::find_if(pts.begin(), pts.end(),
[id](ptval const& p) { return p.second == id; });
return &it->first;
};
auto insert = [&, find](Node n) -> Node const* {
pts.insert(std::make_pair(n, n.id));
return find(n.id);
};
Now you can have:
Node const* last = insert(Node{nullptr, start, 0}); // insert root
As well as:
ptval res;
pts.query(bgi::nearest(newpos, 1), &res);
Node new_node{find(res.second), newpos, id};
if (id == marker) {
marker_parent = new_node.parent;
}
parents.push_back(new_node.parent->id);
Next up
srand(time(nullptr)); // seed rng
// generate random pos
double xr = ((double)rand() / RAND_MAX) * 200;
double yr = ((double)rand() / RAND_MAX) * 200;
Prefer standard library:
std::mt19937 prng{std::random_device{}()};
std::uniform_real_distribution<double> dist(0, 200);
// generate random pos
double xr = dist(prng);
double yr = dist(prng);
Next up
Node* save;
Prefer to initialize:
Node* save = nullptr;
Know your loops.
int c = 1; // running count
// ...
while (c < 2000) {
//...
c++;
}
Should just be
for (int id = 1; id<2000; ++id) {
//...
}
Note the naming as well.
Don't use using namespace std; (Why is "using namespace std;" considered bad practice?)
Applying all this leads me to:
Live On Compiler Explorer
//#include <algorithm>
#include <iostream>
//#include <cmath>
//#include <stdlib.h>
//#include <stdio.h>
//#include <string>
//#include <vector>
//#include <set>
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/box.hpp>
#include <boost/geometry/geometries/point.hpp>
#include <boost/geometry/geometries/register/point.hpp>
#include <boost/geometry/geometry.hpp>
#include <boost/geometry/index/rtree.hpp>
#include <random>
namespace bg = boost::geometry;
namespace bgi = boost::geometry::index;
typedef bg::model::point<double, 2, bg::cs::cartesian> point;
struct Node {
Node const* parent = nullptr;
point pos = {};
int id = -1;
};
typedef std::pair<Node, int> ptval;
BOOST_GEOMETRY_REGISTER_POINT_2D_GET_SET(Node, double, bg::cs::cartesian,
pos.get<0>, pos.get<1>, pos.set<0>,
pos.set<1>)
double px(point const& p) { return p.get<0>(); }
double py(point const& p) { return p.get<1>(); }
double nx(Node const* n) { return px(n->pos); }
double ny(Node const* n) { return py(n->pos); }
int main(int argc, char** argv) {
point start(0, 0) /*, end(200, 200)*/;
bgi::rtree<ptval, bgi::quadratic<16>> pts;
auto find = [&pts](int id) -> Node const* {
auto it = std::find_if(pts.begin(), pts.end(),
[id](ptval const& p) { return p.second == id; });
return &it->first;
};
auto insert = [&, find](Node n) -> Node const* {
pts.insert(std::make_pair(n, n.id));
return find(n.id);
};
Node const* last = insert(Node{nullptr, start, 0}); // insert root
std::mt19937 prng{argc > 1 ? atoi(argv[1]) : std::random_device{}()};
std::uniform_real_distribution<double> dist(0, 200);
std::vector<int> parents{-1}; // actual parent ids
Node const* marker_parent = nullptr;
int marker = 500;
for (int id = 1; id < 2000; ++id) {
// generate random pos
point newpos(dist(prng), dist(prng));
ptval res;
pts.query(bgi::nearest(newpos, 1), &res);
Node new_node{find(res.second), newpos, id};
if (id == marker) {
marker_parent = new_node.parent;
}
parents.push_back(new_node.parent->id);
if (id > marker && id < marker + 50 &&
marker_parent->id != parents[marker]) {
std::cout << "CHANGED " << id << " " << parents[marker] << " "
<< marker_parent->id << " " << marker_parent
<< " " << nx(marker_parent) << " "
<< ny(marker_parent) << std::endl;
}
last = insert(new_node);
}
std::cout << "ULTIMATE " << parents[marker] << " ";
if (marker_parent)
std::cout << marker_parent->id << std::endl;
else
std::cout << std::endl;
}
Prints (with the seed fixed at 42 for online demo):
ULTIMATE 307 1622
The funny thing is, as far as I can see last is never used. So you can probably do with much simpler and more efficient:
auto insert = [&](Node n) { pts.insert(std::make_pair(n, n.id)); };
Isolating The Bug - Root Cause
Note above when I said "might actually work until the tree invalidates references". Sadly I was unable to find any documentation of reference stability/invalidation guarantees for rtree¹.
With different seeds we still get output like e.g. for seed 47:
CHANGED 501 189 38 0x617000004e30 26.0044 140.042
...
CHANGED 549 189 38 0x617000004e30 26.0044 140.042
ULTIMATE 189 474
Obviously, at least all the lines are identical (except for the running counter 501..549). Comparing seed 40:
CHANGED 517 108 500 0x560957ad1638 109.71 53.341
...
CHANGED 549 108 500 0x560957ad1638 109.71 53.341
ULTIMATE 108 590
Illustrates that sometimes the marker "randomly" changes during the game. The problem then, clearly, is that rtree can reallocate, invalidating Node references.
The number 517 is interesting to me for its relation to 16 in bgi::quadratic<16>. So, I thought to make a much more granular test instead of randomly monitoring the 500th "marker". Why not monitor all parents?
constexpr auto N = 2000;
std::vector<Node const*> parents(N, nullptr); // parents refs
std::vector<int> parent_ids(N, -1); // parent ids
for (int id = 1; id < N; ++id) {
// generate random pos
point newpos(dist(prng), dist(prng));
ptval res;
pts.query(bgi::nearest(newpos, 1), &res);
Node new_node{find(res.second), newpos, id};
parents[id] = new_node.parent;
parent_ids[id] = new_node.parent->id;
bool invalidated = !std::equal(
parents.begin(), parents.end(), parent_ids.begin(),
[](Node const* p, int id) { return !p || p->id == id; });
std::cout << "invalidated: " << std::boolalpha << invalidated << "\n";
//assert(!invalidated);
insert(new_node);
}
Now running this a few hundred times with random seeds shows a pattern of always invalidating after 16 nodes:
$ for a in {1..100}; do ./build/sotest; done | uniq -c | sort | uniq
16 invalidated: false
1983 invalidated: true
1 ULTIMATE 101 1393
1 ULTIMATE 103 262
1 ULTIMATE 103 5
1 ULTIMATE 104 536
1 ULTIMATE 108 1406
1 ULTIMATE 111 111
This confirms beyond any statistical doubt that reallocation is causing the references (i.e. the pointers) to become invalidated.
FIXING
I suggest keeping the parent references by id only. Interestingly, since id is already part of Node you can do without the duplication in std::pair by using a custom ``indexable<>or customIndexableGetter` template argument.
Let's store references to Nodes in the tree, instead, using a custom indexable:
using NodeRef = std::reference_wrapper<Node const>;
struct MyIndexable {
using value_type = NodeRef;
using result_type = Node const&;
result_type operator()(NodeRef r) const { return r; }
};
Now we can trivially build a tree from those:
bgi::rtree<NodeRef, bgi::quadratic<16>, MyIndexable> pts;
All we need is some stable storage for the referenced nodes:
std::deque<Node> storage; // reference stability adding/removing at either end
//std::list<Node> storage; // iterator and reference stability (except removed)
auto insert = [&](Node n) {
storage.push_back(std::move(n));
pts.insert(NodeRef(storage.back()));
};
The rest of the program can remain pretty much the same - except for a few respellings to accomodate NodeRef:
Live On Coliru
#undef NDEBUG
#include <iostream>
#include <boost/geometry.hpp>
#include <boost/geometry/geometries/box.hpp>
#include <boost/geometry/geometries/point.hpp>
#include <boost/geometry/geometries/register/point.hpp>
#include <boost/geometry/geometry.hpp>
#include <boost/geometry/index/rtree.hpp>
#include <random>
namespace bg = boost::geometry;
namespace bgi = boost::geometry::index;
typedef bg::model::point<double, 2, bg::cs::cartesian> point;
struct Node {
Node const* parent = nullptr;
point pos = {};
int id = -1;
};
BOOST_GEOMETRY_REGISTER_POINT_2D_GET_SET(Node, double, bg::cs::cartesian,
pos.get<0>, pos.get<1>, pos.set<0>,
pos.set<1>)
double px(point const& p) { return p.get<0>(); }
double py(point const& p) { return p.get<1>(); }
double nx(Node const* n) { return px(n->pos); }
double ny(Node const* n) { return py(n->pos); }
using NodeRef = std::reference_wrapper<Node const>;
struct MyIndexable {
using value_type = NodeRef;
using result_type = Node const&;
result_type operator()(NodeRef r) const { return r; }
};
int main(int argc, char** argv) {
bgi::rtree<NodeRef, bgi::quadratic<16>, MyIndexable> pts;
std::deque<Node> storage; // reference stability adding/removing at either end
//std::list<Node> storage; // iterator and reference stability (except removed)
auto insert = [&](Node n) {
storage.push_back(std::move(n));
pts.insert(NodeRef(storage.back()));
};
insert({nullptr, point(0, 0), 0}); // insert root
std::mt19937 prng{argc > 1 ? atoi(argv[1]) : std::random_device{}()};
std::uniform_real_distribution<double> dist(0, 200);
constexpr auto N = 2000;
std::vector<NodeRef> parents; // parents refs
std::vector<int> parent_ids; // parent ids
for (int id = 1; id < N; ++id) {
// generate random pos
point newpos(dist(prng), dist(prng));
std::vector<NodeRef> res;
pts.query(bgi::nearest(newpos, 1), back_inserter(res));
Node new_node{&res.front().get(), newpos, id};
parents.push_back(*new_node.parent);
parent_ids.push_back(new_node.parent->id);
bool invalidated =
!std::equal(parents.begin(), parents.end(), parent_ids.begin(),
[](Node const& p, int id) { return p.id == id; });
assert(!invalidated);
insert(std::move(new_node));
}
std::cout << "ULTIMATE " << parent_ids.at(500) << " "
<< parents.at(500).get().id << "\n";
}
Which never trips the assert, and produces stable output like:
for a in {1..50}; do ./a.out; done
ULTIMATE 41 41
ULTIMATE 365 365
ULTIMATE 219 219
ULTIMATE 193 193
ULTIMATE 448 448
ULTIMATE 331 331
ULTIMATE 227 227
ULTIMATE 234 234
ULTIMATE 300 300
ULTIMATE 227 227
ULTIMATE 243 243
ULTIMATE 248 248
ULTIMATE 233 233
ULTIMATE 143 143
ULTIMATE 39 39
ULTIMATE 488 488
ULTIMATE 493 493
ULTIMATE 9 9
ULTIMATE 212 212
ULTIMATE 338 338
ULTIMATE 141 141
ULTIMATE 356 356
ULTIMATE 147 147
ULTIMATE 376 376
ULTIMATE 76 76
ULTIMATE 450 450
ULTIMATE 272 272
ULTIMATE 34 34
ULTIMATE 492 492
ULTIMATE 478 478
ULTIMATE 84 84
ULTIMATE 416 416
ULTIMATE 222 222
ULTIMATE 457 457
ULTIMATE 95 95
ULTIMATE 446 446
ULTIMATE 233 233
ULTIMATE 480 480
ULTIMATE 265 265
ULTIMATE 415 415
ULTIMATE 289 289
ULTIMATE 121 121
ULTIMATE 344 344
ULTIMATE 110 110
ULTIMATE 429 429
ULTIMATE 31 31
ULTIMATE 344 344
ULTIMATE 172 172
ULTIMATE 20 20
ULTIMATE 394 394
Bonus
A version without the now-redundant sanity checks and unused code: Live On Coliru
¹ although there are some notes about iterator invalidation
I write grid-stride loop to have High Performance Calculations, where large N, for example long long N 1<<36, or even more. From total grid I need only some indexes, which have to satisfy the define condition.
__global__ void Indexes(int *array, int N) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
while( index<N)
{
if (condition)
{....//do something to save index in array}
index += blockDim.x * gridDim.x;
}
}
Of course, it is possible use the Thrust, which allows to have both host and device arrays. But in this case obviously the calculation will be extremely ineffective, because need firstly to create a lot of non-needed elements, then to delete these.
What is the most effective way to save the indexes directly in array in device to pass in CPU?
If your output is relatively dense (i.e. a lot of indices and relatively few zeros), then the stream compaction approach suggested in comments is a good solution. There are a lot of ready-to-go stream compaction implementations which you can probably adapt to your purposes.
If your output is sparse, so you need to save relatively few indices for a lot of inputs, then stream compaction isn't such a great solution because it will waste a lot of GPU memory. In that case (and you can roughly estimate an upper bound of the number of output indices) something like this:
template <typename T>
struct Array
{
T* p;
int Nmax;
int* next;
Array() = default;
__host__ __device__
Array(T* _p, int _Nmax, int* _next) : p(_p), Nmax(_Nmax), next(_next) {};
__device__
int append(T& val)
{
int pos = atomicAdd(next, 1);
if (pos > Nmax) {
atomicExch(next, Nmax);
return -1;
} else {
p[pos] = val;
return pos;
}
};
};
is probably more appropriate. Here, the idea is to use an atomically incremented position in the output array to keep track of where a thread should store its index. The code will signal if you fill the index array, and there will be information from which you can work out a restart strategy to stop the current kernel and then start from the last known index which you were able to store.
A complete example:
$ cat append.cu
#include <iostream>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/copy.h>
namespace AppendArray
{
template <typename T>
struct Array
{
T* p;
int Nmax;
int* next;
Array() = default;
__host__ __device__
Array(T* _p, int _Nmax, int* _next) : p(_p), Nmax(_Nmax), next(_next) {};
__device__
int append(T& val)
{
int pos = atomicAdd(next, 1);
if (pos > Nmax) {
atomicExch(next, Nmax);
return -1;
} else {
p[pos] = val;
return pos;
}
};
};
}
__global__
void kernelfind(int* input, int N, AppendArray::Array<int> indices)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
for(; idx < N; idx += gridDim.x*blockDim.x) {
if (input[idx] % 10000 == 0) {
if (indices.append(idx) < 0) return;
}
}
}
int main()
{
const int Ninputs = 1 << 20;
thrust::device_vector<int> inputs(Ninputs);
thrust::counting_iterator<int> vals(1);
thrust::copy(vals, vals + Ninputs, inputs.begin());
int* d_input = thrust::raw_pointer_cast(inputs.data());
int Nindices = Ninputs >> 12;
thrust::device_vector<int> indices(Nindices);
int* d_indices = thrust::raw_pointer_cast(indices.data());
int* pos; cudaMallocManaged(&pos, sizeof(int)); *pos = 0;
AppendArray::Array<int> index(d_indices, Nindices-1, pos);
int gridsize, blocksize;
cudaOccupancyMaxPotentialBlockSize(&gridsize, &blocksize, kernelfind, 0, 0);
kernelfind<<<gridsize, blocksize>>>(d_input, Ninputs, index);
cudaDeviceSynchronize();
for(int i = 0; i < *pos; ++i) {
int idx = indices[i];
std::cout << i << " " << idx << " " << inputs[idx] << std::endl;
}
return 0;
}
$ nvcc -std=c++11 -arch=sm_52 -o append append.cu
$ ./append
0 9999 10000
1 19999 20000
2 29999 30000
3 39999 40000
4 49999 50000
5 69999 70000
6 79999 80000
7 59999 60000
8 89999 90000
9 109999 110000
10 99999 100000
11 119999 120000
12 139999 140000
13 129999 130000
14 149999 150000
15 159999 160000
16 169999 170000
17 189999 190000
18 179999 180000
19 199999 200000
20 209999 210000
21 219999 220000
22 239999 240000
23 249999 250000
24 229999 230000
25 279999 280000
26 269999 270000
27 259999 260000
28 319999 320000
29 329999 330000
30 289999 290000
31 299999 300000
32 339999 340000
33 349999 350000
34 309999 310000
35 359999 360000
36 379999 380000
37 399999 400000
38 409999 410000
39 369999 370000
40 429999 430000
41 419999 420000
42 389999 390000
43 439999 440000
44 459999 460000
45 489999 490000
46 479999 480000
47 449999 450000
48 509999 510000
49 539999 540000
50 469999 470000
51 499999 500000
52 569999 570000
53 549999 550000
54 519999 520000
55 589999 590000
56 529999 530000
57 559999 560000
58 619999 620000
59 579999 580000
60 629999 630000
61 669999 670000
62 599999 600000
63 609999 610000
64 699999 700000
65 639999 640000
66 649999 650000
67 719999 720000
68 659999 660000
69 679999 680000
70 749999 750000
71 709999 710000
72 689999 690000
73 729999 730000
74 779999 780000
75 799999 800000
76 809999 810000
77 739999 740000
78 849999 850000
79 759999 760000
80 829999 830000
81 789999 790000
82 769999 770000
83 859999 860000
84 889999 890000
85 879999 880000
86 819999 820000
87 929999 930000
88 869999 870000
89 839999 840000
90 909999 910000
91 939999 940000
92 969999 970000
93 899999 900000
94 979999 980000
95 959999 960000
96 949999 950000
97 1019999 1020000
98 1009999 1010000
99 989999 990000
100 1029999 1030000
101 919999 920000
102 1039999 1040000
103 999999 1000000
I have the following code:
#pragma pack(4)
struct RECORD_HEADER {
uint64_t msgType;
uint64_t rdtsc;
};
struct BODY {
char content[488];
};
#pragma pack()
class SerializedRDTSC {
public:
typedef unsigned long long timeunit_t;
static timeunit_t start(void) {
unsigned cycles_high, cycles_low;
__asm__ __volatile__ ( "CPUID\n\t"
"RDTSC\n\t"
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t": "=r" (cycles_high), "=r" (cycles_low)::
"%rax", "%rbx", "%rcx", "%rdx");
return ( (unsigned long long)cycles_low)|( ((unsigned long long)cycles_high)<<32 );
}
static timeunit_t end(void) {
unsigned cycles_high, cycles_low;
__asm__ __volatile__( "RDTSCP\n\t"
"mov %%edx, %0\n\t"
"mov %%eax, %1\n\t"
"CPUID\n\t": "=r" (cycles_high), "=r" (cycles_low):: "%rax",
"%rbx", "%rcx", "%rdx");
return ( (unsigned long long)cycles_low)|( ((unsigned long long)cycles_high)<<32 );
}
};
char* createSHM() noexcept {
const auto sharedMemHandle = shm_open("testing", O_RDWR | O_CREAT, 0666);
if (-1 == sharedMemHandle) {
std::cout << "failed to open named shared memory: " << std::endl;
return nullptr;
}
constexpr int32_t size = (1 << 26);
ftruncate(sharedMemHandle, size);
char* ptr = (char*) mmap(nullptr, size, PROT_READ | PROT_WRITE,
MAP_SHARED, sharedMemHandle, 0);
if (MAP_FAILED == ptr) {
std::cout << errno << std::endl;
return nullptr;
}
const auto rc = fchmod(sharedMemHandle, 0666);
if (rc == -1) {
fprintf(stderr,
"Can't change permissions to 0666 on shared mem segment: %m\n");
fflush(stderr);
}
return ptr;
}
int main() {
BODY update;
srand(time(nullptr));
char* ptr = createSHM();
constexpr uint64_t n = 700;
constexpr uint64_t n2 = 10;
uint64_t m_data[n * n2];
memset(m_data, 0, sizeof(m_data));
uint64_t r = 0;
for (uint64_t i = 0; i < n; i++) {
for (uint64_t k = 0; k < n2; k++) {
// populate the header
const auto msgType = rand();
const auto rdtsc = rand();
// populate the struct randomly
uint32_t* tmp = reinterpret_cast<uint32_t*>(&update);
for (uint32_t j = 0; j < sizeof(BODY) / sizeof(uint32_t); j++) {
const uint32_t v = rand() % 32767;
tmp[j] = v;
}
// write the struct
const auto s = SerializedRDTSC::start();
memcpy(ptr, (char*)&msgType, sizeof(uint64_t));
ptr+= sizeof(uint64_t);
memcpy(ptr, (char*)&rdtsc, sizeof(uint64_t));
ptr+= sizeof(uint64_t);
memcpy(ptr, &update, sizeof(BODY));
ptr+= sizeof(BODY);
const auto e = SerializedRDTSC::end();
m_data[r++] = e - s;
}
usleep(249998);
}
for (uint32_t i = 0; i < r; i++) {
std::cout << i << "," << m_data[i] << std::endl;
}
}
And for some reason, there are periodic latency spike according to the output:
0 9408
1 210
2 162
3 176
4 172
5 164
6 172
7 8338
8 174
9 186
10 612
11 380
12 380
13 374
14 358
15 13610
16 190
17 186
18 164
19 168
20 246
21 196
22 170
23 5066
24 176
25 176
26 168
27 174
28 166
29 440
30 232
31 214
32 5128
33 180
34 178
35 172
36 174
37 184
38 170
39 162
40 5964
41 182
42 174
43 164
44 180
45 180
46 162
47 172
I already isolated the core and double-checked with htop to make sure no other processes were using the core.
My machine has an i7 CPU (nothing fancy).
And then I tried with an Xeon CPU. The pattern is about the same -- every 7-11 write, there was a spike.
With i7 CPU, I compiled with GCC 7.2 with c++17 and ran it on CentOS 7.3.
With Xeon CPU, I compiled with GCC 4.6 with c++0x and ran it on CentOS 6.5.
My questions are:
1. Why there were periodic latency spikes? (I checked with strace. And I don't see weird system call involved)
2. Any suggestion on how to investigate/understand the spike? More for my learning.
Thanks in advance!
P.S. Yes, some people object to use rdtsc to measure latency because temperature affects TSC. Tho, I don't see any better option as I don't have PTP, and clock_gettime() sometimes will have latency spikes too. If you have any suggestion, it is more than welcome :)
A memory page is 4K bytes. Every time you start writing on a new page, that page needs mapped into the process address space. Since the data you're writing every loop is 8 + 8 + 488 = 504 bytes, you'll get a spike every 8 or 9 time thru the loop.
Since the CPU can speculatively prefetch data from memory, the page fault for the 2nd page (which should occur on the 8th loop) occurs one loop earlier than expected, when the hardware prefetcher tries to access the page.
This question is unlikely to help any future visitors; it is only relevant to a small geographic area, a specific moment in time, or an extraordinarily narrow situation that is not generally applicable to the worldwide audience of the internet. For help making this question more broadly applicable, visit the help center.
Closed 9 years ago.
I am currently trying to understand how the following code (http://pastebin.com/zTHUrmyx) works, my approach is currently compiling the software in debug and using gdb to step through the code.
However, I'm running into the problem that 'step' does not always tell me what is going on. Particularly unclear to me is the EXECUTE {...} which I cannot step into.
How do I go about learning what the code is doing?
1 /*
2 Copyright 2008 Brain Research Institute, Melbourne, Australia
3
4 Written by J-Donald Tournier, 27/06/08.
5
6 This file is part of MRtrix.
7
8 MRtrix is free software: you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation, either version 3 of the License, or
11 (at your option) any later version.
12
13 MRtrix is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with MRtrix. If not, see <http://www.gnu.org/licenses/>.
20
21
22 15-10-2008 J-Donald Tournier <d.tournier#brain.org.au>
23 * fix -prs option handling
24 * remove MR::DICOM_DW_gradients_PRS flag
25
26 15-10-2008 J-Donald Tournier <d.tournier#brain.org.au>
27 * add -layout option to manipulate data ordering within the image file
28
29 14-02-2010 J-Donald Tournier <d.tournier#brain.org.au>
30 * fix -coord option so that the "end" keyword can be used
31
32
33 */
34
35 #include "app.h"
36 #include "image/position.h"
37 #include "image/axis.h"
38 #include "math/linalg.h"
39
40 using namespace std;
41 using namespace MR;
42
43 SET_VERSION_DEFAULT;
44
45 DESCRIPTION = {
46 "perform conversion between different file types and optionally extract a subset of the input image.",
47 "If used correctly, this program can be a very useful workhorse. In addition to converting images between different formats, it can be used to extract specific studies from a data set, extract a specific region of interest, flip the images, or to scale the intensity of the images.",
48 NULL
49 };
50
51 ARGUMENTS = {
52 Argument ("input", "input image", "the input image.").type_image_in (),
53 Argument ("ouput", "output image", "the output image.").type_image_out (),
54 Argument::End
55 };
56
57
58 const gchar* type_choices[] = { "REAL", "IMAG", "MAG", "PHASE", "COMPLEX", NULL };
59 const gchar* data_type_choices[] = { "FLOAT32", "FLOAT32LE", "FLOAT32BE", "FLOAT64", "FLOAT64LE", "FLOAT64BE",
60 "INT32", "UINT32", "INT32LE", "UINT32LE", "INT32BE", "UINT32BE",
61 "INT16", "UINT16", "INT16LE", "UINT16LE", "INT16BE", "UINT16BE",
62 "CFLOAT32", "CFLOAT32LE", "CFLOAT32BE", "CFLOAT64", "CFLOAT64LE", "CFLOAT64BE",
63 "INT8", "UINT8", "BIT", NULL };
64
65 OPTIONS = {
66 Option ("coord", "select coordinates", "extract data only at the coordinates specified.", false, true)
67 .append (Argument ("axis", "axis", "the axis of interest").type_integer (0, INT_MAX, 0))
68 .append (Argument ("coord", "coordinates", "the coordinates of interest").type_sequence_int()),
69
70 Option ("vox", "voxel size", "change the voxel dimensions.")
71 .append (Argument ("sizes", "new dimensions", "A comma-separated list of values. Only those values specified will be changed. For example: 1,,3.5 will change the voxel size along the x & z axes, and leave the y-axis voxel size unchanged.")
72 .type_sequence_float ()),
73
74 Option ("datatype", "data type", "specify output image data type.")
75 .append (Argument ("spec", "specifier", "the data type specifier.").type_choice (data_type_choices)),
76
77 Option ("scale", "scaling factor", "apply scaling to the intensity values.")
78 .append (Argument ("factor", "factor", "the factor by which to multiply the intensities.").type_float (NAN, NAN, 1.0)),
79
80 Option ("offset", "offset", "apply offset to the intensity values.")
81 .append (Argument ("bias", "bias", "the value of the offset.").type_float (NAN, NAN, 0.0)),
82
83 Option ("zero", "replace NaN by zero", "replace all NaN values with zero."),
84
85 Option ("output", "output type", "specify type of output")
86 .append (Argument ("type", "type", "type of output.")
87 .type_choice (type_choices)),
88
89 Option ("layout", "data layout", "specify the layout of the data in memory. The actual layout produced will depend on whether the output image format can support it.")
90 .append (Argument ("spec", "specifier", "the data layout specifier.").type_string ()),
91
92 Option ("prs", "DW gradient specified as PRS", "assume that the DW gradients are specified in the PRS frame (Siemens DICOM only)."),
93
94 Option::End
95 };
96
97
98
99 inline bool next (Image::Position& ref, Image::Position& other, const std::vector<int>* pos)
100 {
101 int axis = 0;
102 do {
103 ref.inc (axis);
104 if (ref[axis] < ref.dim(axis)) {
105 other.set (axis, pos[axis][ref[axis]]);
106 return (true);
107 }
108 ref.set (axis, 0);
109 other.set (axis, pos[axis][0]);
110 axis++;
111 } while (axis < ref.ndim());
112 return (false);
113 }
114
115
116
117
118
119 EXECUTE {
120 std::vector<OptBase> opt = get_options (1); // vox
121 std::vector<float> vox;
122 if (opt.size())
123 vox = parse_floats (opt[0][0].get_string());
124
125
126 opt = get_options (3); // scale
127 float scale = 1.0;
128 if (opt.size()) scale = opt[0][0].get_float();
129
130 opt = get_options (4); // offset
131 float offset = 0.0;
132 if (opt.size()) offset = opt[0][0].get_float();
133
134 opt = get_options (5); // zero
135 bool replace_NaN = opt.size();
136
137 opt = get_options (6); // output
138 Image::OutputType output_type = Image::Default;
139 if (opt.size()) {
140 switch (opt[0][0].get_int()) {
141 case 0: output_type = Image::Real; break;
142 case 1: output_type = Image::Imaginary; break;
143 case 2: output_type = Image::Magnitude; break;
144 case 3: output_type = Image::Phase; break;
145 case 4: output_type = Image::RealImag; break;
146 }
147 }
148
149
150
151
152 Image::Object &in_obj (*argument[0].get_image());
153
154 Image::Header header (in_obj);
155
156 if (output_type == 0) {
157 if (in_obj.is_complex()) output_type = Image::RealImag;
158 else output_type = Image::Default;
159 }
160
161 if (output_type == Image::RealImag) header.data_type = DataType::CFloat32;
162 else if (output_type == Image::Phase) header.data_type = DataType::Float32;
163 else header.data_type.unset_flag (DataType::ComplexNumber);
164
165
166 opt = get_options (2); // datatype
167 if (opt.size()) header.data_type.parse (data_type_choices[opt[0][0].get_int()]);
168
169 for (guint n = 0; n < vox.size(); n++)
170 if (isfinite (vox[n])) header.axes.vox[n] = vox[n];
171
172 opt = get_options (7); // layout
173 if (opt.size()) {
174 std::vector<Image::Axis> ax = parse_axes_specifier (header.axes, opt[0][0].get_string());
175 if (ax.size() != (guint) header.axes.ndim())
176 throw Exception (String("specified layout \"") + opt[0][0].get_string() + "\" does not match image dimensions");
177
178 for (guint i = 0; i < ax.size(); i++) {
179 header.axes.axis[i] = ax[i].axis;
180 header.axes.forward[i] = ax[i].forward;
181 }
182 }
183
184
185 opt = get_options (8); // prs
186 if (opt.size() && header.DW_scheme.rows() && header.DW_scheme.columns()) {
187 for (guint row = 0; row < header.DW_scheme.rows(); row++) {
188 double tmp = header.DW_scheme(row, 0);
189 header.DW_scheme(row, 0) = header.DW_scheme(row, 1);
190 header.DW_scheme(row, 1) = tmp;
191 header.DW_scheme(row, 2) = -header.DW_scheme(row, 2);
192 }
193 }
194
195 std::vector<int> pos[in_obj.ndim()];
196
197 opt = get_options (0); // coord
198 for (guint n = 0; n < opt.size(); n++) {
199 int axis = opt[n][0].get_int();
200 if (pos[axis].size()) throw Exception ("\"coord\" option specified twice for axis " + str (axis));
201 pos[axis] = parse_ints (opt[n][1].get_string(), header.dim(axis)-1);
202 header.axes.dim[axis] = pos[axis].size();
203 }
204
205 for (int n = 0; n < in_obj.ndim(); n++) {
206 if (pos[n].empty()) {
207 pos[n].resize (in_obj.dim(n));
208 for (guint i = 0; i < pos[n].size(); i++) pos[n][i] = i;
209 }
210 }
211
212
213 in_obj.apply_scaling (scale, offset);
214
215
216
217
218
219
220 Image::Position in (in_obj);
221 Image::Position out (*argument[1].get_image (header));
222
223 for (int n = 0; n < in.ndim(); n++) in.set (n, pos[n][0]);
224
225 ProgressBar::init (out.voxel_count(), "copying data...");
226
227 do {
228
229 float re, im = 0.0;
230 in.get (output_type, re, im);
231 if (replace_NaN) if (gsl_isnan (re)) re = 0.0;
232 out.re (re);
233
234 if (output_type == Image::RealImag) {
235 if (replace_NaN) if (gsl_isnan (im)) im = 0.0;
236 out.im (im);
237 }
238
239 ProgressBar::inc();
240 } while (next (out, in, pos));
241
242 ProgressBar::done();
243 }
As was noted in the comments, EXECUTE seems to be a macro, apparent from the context a function header (and maybe a bit more, e.g. some global variables and functions), so the part in curly braces is the function body.
To get to the definition of EXECUTE, you will have to examine the headers.
However, if you can reach some part of the code during debugging, you could insert a string or char[] at that point, giving it the stringified version of EXECUTE, so you get whatever the preprocessor will emit for EXECUTE at that position in the code.
#define STR(x) #x
#define STRINGIFY(x) STR(x)
char c[] = STRINGIFY(EXECUTE);
the two macros are a known little macro trick to get the content of any macro as a string literal. Try it out and inspect the char array in your debugger to get the content of execute.
My wild guess here: EXECUTE is the main function or a replacement for it, the OPTIONS and ARGUMENTS describe what arguments the program expects and what command line options you can pass to it. Those macros and some of the used functions and variables (get_options, argument) are part of a little framework that should facilitate the usage, evaluation and user information about command line options.
as I understand it, FindAtom returns 0 if the requested Atom is not found. But even in this small code, without any AddAtom at all, it seems to always return something. Why is that? Doesn’t this make FindAtom with Ints useless? :confus:
int main( ) {
cout << FindAtom(MAKEINTATOM(12345)); // Output: 12345
cout << FindAtom(MAKEINTATOM(2011)); // Output: 2011
return 0;
}
Bye
Signature of the FindAtom function:
ATOM WINAPI FindAtom(
__in LPCTSTR lpString
);
I found the following code in Wine svn-repository. It represents the basic logic:
295 ATOM WINAPI FindAtom16( LPCSTR str )
296 {
297 ATOMTABLE * table;
298 WORD hash,iatom;
299 HANDLE16 entry;
300 int len;
301
302 TRACE("%s\n",debugstr_a(str));
303
304 if (ATOM_IsIntAtomA( str, &iatom )) return iatom;
305 if ((len = strlen( str )) > 255) len = 255;
306 if (!(table = ATOM_GetTable( FALSE ))) return 0;
307 hash = ATOM_Hash( table->size, str, len );
308 entry = table->entries[hash];
309 while (entry)
310 {
311 ATOMENTRY * entryPtr = ATOM_MakePtr( entry );
312 if ((entryPtr->length == len) &&
313 (!strncasecmp( entryPtr->str, str, len )))
314 {
315 TRACE("-- found %x\n", entry);
316 return HANDLETOATOM( entry );
317 }
318 entry = entryPtr->next;
319 }
320 TRACE("-- not found\n");
321 return 0;
322 }
So, let's try to trace the program, when it's calling the FindAtom function:
FindAtom(MAKEINTATOM(12345));
MAKEINTATOM is a macro:
#define MAKEINTATOM(i) (LPTSTR)((DWORD)((WORD)(i)))
MAKEINTATOM(12345) returns an integer atom cast to a string pointer.
Calling FindAtom(MAKEINTATOM(12345))
Checking the condition if (ATOM_IsIntAtomA( str, &iatom ))
119 static BOOL ATOM_IsIntAtomA(LPCSTR atomstr,WORD *atomid)
120 {
121 UINT atom = 0;
122 if (!HIWORD(atomstr)) atom = LOWORD(atomstr);
123 else
124 {
125 if (*atomstr++ != '#') return FALSE;
126 while (*atomstr >= '' && *atomstr <= '9')
127 {
128 atom = atom * 10 + *atomstr - '';
129 atomstr++;
130 }
131 if (*atomstr) return FALSE;
132 }
133 if (atom >= MAXINTATOM)
134 {
135 SetLastError( ERROR_INVALID_PARAMETER );
136 atom = 0;
137 }
138 *atomid = atom;
139 return TRUE;
140 }
As you can see this function will return a pointer to UINT when called with an argument which represents an integer atom.
Then it executes the body
return iatom;
So when you call cout << FindAtom (MAKEINTATOM (12345)); you get 12345 as output.